1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  37 #include "gc_implementation/parNew/parNewGeneration.hpp"
  38 #include "gc_implementation/shared/collectorCounters.hpp"
  39 #include "gc_implementation/shared/gcTimer.hpp"
  40 #include "gc_implementation/shared/gcTrace.hpp"
  41 #include "gc_implementation/shared/gcTraceTime.hpp"
  42 #include "gc_implementation/shared/isGCActiveMark.hpp"
  43 #include "gc_interface/collectedHeap.inline.hpp"
  44 #include "memory/allocation.hpp"
  45 #include "memory/cardGeneration.inline.hpp"
  46 #include "memory/cardTableRS.hpp"
  47 #include "memory/collectorPolicy.hpp"
  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.inline.hpp"
  53 #include "memory/padded.hpp"
  54 #include "memory/referencePolicy.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/tenuredGeneration.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "prims/jvmtiExport.hpp"
  59 #include "runtime/atomic.inline.hpp"
  60 #include "runtime/globals_extension.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "services/memoryService.hpp"
  66 #include "services/runtimeService.hpp"
  67 
  68 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  69 
  70 // statics
  71 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  72 bool CMSCollector::_full_gc_requested = false;
  73 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  74 
  75 //////////////////////////////////////////////////////////////////
  76 // In support of CMS/VM thread synchronization
  77 //////////////////////////////////////////////////////////////////
  78 // We split use of the CGC_lock into 2 "levels".
  79 // The low-level locking is of the usual CGC_lock monitor. We introduce
  80 // a higher level "token" (hereafter "CMS token") built on top of the
  81 // low level monitor (hereafter "CGC lock").
  82 // The token-passing protocol gives priority to the VM thread. The
  83 // CMS-lock doesn't provide any fairness guarantees, but clients
  84 // should ensure that it is only held for very short, bounded
  85 // durations.
  86 //
  87 // When either of the CMS thread or the VM thread is involved in
  88 // collection operations during which it does not want the other
  89 // thread to interfere, it obtains the CMS token.
  90 //
  91 // If either thread tries to get the token while the other has
  92 // it, that thread waits. However, if the VM thread and CMS thread
  93 // both want the token, then the VM thread gets priority while the
  94 // CMS thread waits. This ensures, for instance, that the "concurrent"
  95 // phases of the CMS thread's work do not block out the VM thread
  96 // for long periods of time as the CMS thread continues to hog
  97 // the token. (See bug 4616232).
  98 //
  99 // The baton-passing functions are, however, controlled by the
 100 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
 101 // and here the low-level CMS lock, not the high level token,
 102 // ensures mutual exclusion.
 103 //
 104 // Two important conditions that we have to satisfy:
 105 // 1. if a thread does a low-level wait on the CMS lock, then it
 106 //    relinquishes the CMS token if it were holding that token
 107 //    when it acquired the low-level CMS lock.
 108 // 2. any low-level notifications on the low-level lock
 109 //    should only be sent when a thread has relinquished the token.
 110 //
 111 // In the absence of either property, we'd have potential deadlock.
 112 //
 113 // We protect each of the CMS (concurrent and sequential) phases
 114 // with the CMS _token_, not the CMS _lock_.
 115 //
 116 // The only code protected by CMS lock is the token acquisition code
 117 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 118 // baton-passing code.
 119 //
 120 // Unfortunately, i couldn't come up with a good abstraction to factor and
 121 // hide the naked CGC_lock manipulation in the baton-passing code
 122 // further below. That's something we should try to do. Also, the proof
 123 // of correctness of this 2-level locking scheme is far from obvious,
 124 // and potentially quite slippery. We have an uneasy suspicion, for instance,
 125 // that there may be a theoretical possibility of delay/starvation in the
 126 // low-level lock/wait/notify scheme used for the baton-passing because of
 127 // potential interference with the priority scheme embodied in the
 128 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 129 // invocation further below and marked with "XXX 20011219YSR".
 130 // Indeed, as we note elsewhere, this may become yet more slippery
 131 // in the presence of multiple CMS and/or multiple VM threads. XXX
 132 
 133 class CMSTokenSync: public StackObj {
 134  private:
 135   bool _is_cms_thread;
 136  public:
 137   CMSTokenSync(bool is_cms_thread):
 138     _is_cms_thread(is_cms_thread) {
 139     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 140            "Incorrect argument to constructor");
 141     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 142   }
 143 
 144   ~CMSTokenSync() {
 145     assert(_is_cms_thread ?
 146              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 147              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 148           "Incorrect state");
 149     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 150   }
 151 };
 152 
 153 // Convenience class that does a CMSTokenSync, and then acquires
 154 // upto three locks.
 155 class CMSTokenSyncWithLocks: public CMSTokenSync {
 156  private:
 157   // Note: locks are acquired in textual declaration order
 158   // and released in the opposite order
 159   MutexLockerEx _locker1, _locker2, _locker3;
 160  public:
 161   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 162                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 163     CMSTokenSync(is_cms_thread),
 164     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 165     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 166     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 167   { }
 168 };
 169 
 170 
 171 //////////////////////////////////////////////////////////////////
 172 //  Concurrent Mark-Sweep Generation /////////////////////////////
 173 //////////////////////////////////////////////////////////////////
 174 
 175 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 176 
 177 // This struct contains per-thread things necessary to support parallel
 178 // young-gen collection.
 179 class CMSParGCThreadState: public CHeapObj<mtGC> {
 180  public:
 181   CFLS_LAB lab;
 182   PromotionInfo promo;
 183 
 184   // Constructor.
 185   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 186     promo.setSpace(cfls);
 187   }
 188 };
 189 
 190 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 191      ReservedSpace rs, size_t initial_byte_size, int level,
 192      CardTableRS* ct, bool use_adaptive_freelists,
 193      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 194   CardGeneration(rs, initial_byte_size, level, ct),
 195   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 196   _did_compact(false)
 197 {
 198   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 199   HeapWord* end    = (HeapWord*) _virtual_space.high();
 200 
 201   _direct_allocated_words = 0;
 202   NOT_PRODUCT(
 203     _numObjectsPromoted = 0;
 204     _numWordsPromoted = 0;
 205     _numObjectsAllocated = 0;
 206     _numWordsAllocated = 0;
 207   )
 208 
 209   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 210                                            use_adaptive_freelists,
 211                                            dictionaryChoice);
 212   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 213   if (_cmsSpace == NULL) {
 214     vm_exit_during_initialization(
 215       "CompactibleFreeListSpace allocation failure");
 216   }
 217   _cmsSpace->_gen = this;
 218 
 219   _gc_stats = new CMSGCStats();
 220 
 221   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 222   // offsets match. The ability to tell free chunks from objects
 223   // depends on this property.
 224   debug_only(
 225     FreeChunk* junk = NULL;
 226     assert(UseCompressedClassPointers ||
 227            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 228            "Offset of FreeChunk::_prev within FreeChunk must match"
 229            "  that of OopDesc::_klass within OopDesc");
 230   )
 231   if (CollectedHeap::use_parallel_gc_threads()) {
 232     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 233     _par_gc_thread_states =
 234       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 235     if (_par_gc_thread_states == NULL) {
 236       vm_exit_during_initialization("Could not allocate par gc structs");
 237     }
 238     for (uint i = 0; i < ParallelGCThreads; i++) {
 239       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 240       if (_par_gc_thread_states[i] == NULL) {
 241         vm_exit_during_initialization("Could not allocate par gc structs");
 242       }
 243     }
 244   } else {
 245     _par_gc_thread_states = NULL;
 246   }
 247   _incremental_collection_failed = false;
 248   // The "dilatation_factor" is the expansion that can occur on
 249   // account of the fact that the minimum object size in the CMS
 250   // generation may be larger than that in, say, a contiguous young
 251   //  generation.
 252   // Ideally, in the calculation below, we'd compute the dilatation
 253   // factor as: MinChunkSize/(promoting_gen's min object size)
 254   // Since we do not have such a general query interface for the
 255   // promoting generation, we'll instead just use the minimum
 256   // object size (which today is a header's worth of space);
 257   // note that all arithmetic is in units of HeapWords.
 258   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 259   assert(_dilatation_factor >= 1.0, "from previous assert");
 260 }
 261 
 262 
 263 // The field "_initiating_occupancy" represents the occupancy percentage
 264 // at which we trigger a new collection cycle.  Unless explicitly specified
 265 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 266 // is calculated by:
 267 //
 268 //   Let "f" be MinHeapFreeRatio in
 269 //
 270 //    _initiating_occupancy = 100-f +
 271 //                           f * (CMSTriggerRatio/100)
 272 //   where CMSTriggerRatio is the argument "tr" below.
 273 //
 274 // That is, if we assume the heap is at its desired maximum occupancy at the
 275 // end of a collection, we let CMSTriggerRatio of the (purported) free
 276 // space be allocated before initiating a new collection cycle.
 277 //
 278 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 279   assert(io <= 100 && tr <= 100, "Check the arguments");
 280   if (io >= 0) {
 281     _initiating_occupancy = (double)io / 100.0;
 282   } else {
 283     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 284                              (double)(tr * MinHeapFreeRatio) / 100.0)
 285                             / 100.0;
 286   }
 287 }
 288 
 289 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 290   assert(collector() != NULL, "no collector");
 291   collector()->ref_processor_init();
 292 }
 293 
 294 void CMSCollector::ref_processor_init() {
 295   if (_ref_processor == NULL) {
 296     // Allocate and initialize a reference processor
 297     _ref_processor =
 298       new ReferenceProcessor(_span,                               // span
 299                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 300                              (int) ParallelGCThreads,             // mt processing degree
 301                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 302                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 303                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 304                              &_is_alive_closure);                 // closure for liveness info
 305     // Initialize the _ref_processor field of CMSGen
 306     _cmsGen->set_ref_processor(_ref_processor);
 307 
 308   }
 309 }
 310 
 311 AdaptiveSizePolicy* CMSCollector::size_policy() {
 312   GenCollectedHeap* gch = GenCollectedHeap::heap();
 313   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 314     "Wrong type of heap");
 315   return gch->gen_policy()->size_policy();
 316 }
 317 
 318 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 319 
 320   const char* gen_name = "old";
 321   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
 322 
 323   // Generation Counters - generation 1, 1 subspace
 324   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 325       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 326 
 327   _space_counters = new GSpaceCounters(gen_name, 0,
 328                                        _virtual_space.reserved_size(),
 329                                        this, _gen_counters);
 330 }
 331 
 332 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 333   _cms_gen(cms_gen)
 334 {
 335   assert(alpha <= 100, "bad value");
 336   _saved_alpha = alpha;
 337 
 338   // Initialize the alphas to the bootstrap value of 100.
 339   _gc0_alpha = _cms_alpha = 100;
 340 
 341   _cms_begin_time.update();
 342   _cms_end_time.update();
 343 
 344   _gc0_duration = 0.0;
 345   _gc0_period = 0.0;
 346   _gc0_promoted = 0;
 347 
 348   _cms_duration = 0.0;
 349   _cms_period = 0.0;
 350   _cms_allocated = 0;
 351 
 352   _cms_used_at_gc0_begin = 0;
 353   _cms_used_at_gc0_end = 0;
 354   _allow_duty_cycle_reduction = false;
 355   _valid_bits = 0;
 356 }
 357 
 358 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 359   // TBD: CR 6909490
 360   return 1.0;
 361 }
 362 
 363 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 364 }
 365 
 366 // If promotion failure handling is on use
 367 // the padded average size of the promotion for each
 368 // young generation collection.
 369 double CMSStats::time_until_cms_gen_full() const {
 370   size_t cms_free = _cms_gen->cmsSpace()->free();
 371   GenCollectedHeap* gch = GenCollectedHeap::heap();
 372   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 373                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 374   if (cms_free > expected_promotion) {
 375     // Start a cms collection if there isn't enough space to promote
 376     // for the next minor collection.  Use the padded average as
 377     // a safety factor.
 378     cms_free -= expected_promotion;
 379 
 380     // Adjust by the safety factor.
 381     double cms_free_dbl = (double)cms_free;
 382     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 383     // Apply a further correction factor which tries to adjust
 384     // for recent occurance of concurrent mode failures.
 385     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 386     cms_free_dbl = cms_free_dbl * cms_adjustment;
 387 
 388     if (PrintGCDetails && Verbose) {
 389       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 390         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 391         cms_free, expected_promotion);
 392       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 393         cms_free_dbl, cms_consumption_rate() + 1.0);
 394     }
 395     // Add 1 in case the consumption rate goes to zero.
 396     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 397   }
 398   return 0.0;
 399 }
 400 
 401 // Compare the duration of the cms collection to the
 402 // time remaining before the cms generation is empty.
 403 // Note that the time from the start of the cms collection
 404 // to the start of the cms sweep (less than the total
 405 // duration of the cms collection) can be used.  This
 406 // has been tried and some applications experienced
 407 // promotion failures early in execution.  This was
 408 // possibly because the averages were not accurate
 409 // enough at the beginning.
 410 double CMSStats::time_until_cms_start() const {
 411   // We add "gc0_period" to the "work" calculation
 412   // below because this query is done (mostly) at the
 413   // end of a scavenge, so we need to conservatively
 414   // account for that much possible delay
 415   // in the query so as to avoid concurrent mode failures
 416   // due to starting the collection just a wee bit too
 417   // late.
 418   double work = cms_duration() + gc0_period();
 419   double deadline = time_until_cms_gen_full();
 420   // If a concurrent mode failure occurred recently, we want to be
 421   // more conservative and halve our expected time_until_cms_gen_full()
 422   if (work > deadline) {
 423     if (Verbose && PrintGCDetails) {
 424       gclog_or_tty->print(
 425         " CMSCollector: collect because of anticipated promotion "
 426         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 427         gc0_period(), time_until_cms_gen_full());
 428     }
 429     return 0.0;
 430   }
 431   return work - deadline;
 432 }
 433 
 434 #ifndef PRODUCT
 435 void CMSStats::print_on(outputStream *st) const {
 436   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 437   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 438                gc0_duration(), gc0_period(), gc0_promoted());
 439   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 440             cms_duration(), cms_period(), cms_allocated());
 441   st->print(",cms_since_beg=%g,cms_since_end=%g",
 442             cms_time_since_begin(), cms_time_since_end());
 443   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 444             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 445 
 446   if (valid()) {
 447     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 448               promotion_rate(), cms_allocation_rate());
 449     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 450               cms_consumption_rate(), time_until_cms_gen_full());
 451   }
 452   st->print(" ");
 453 }
 454 #endif // #ifndef PRODUCT
 455 
 456 CMSCollector::CollectorState CMSCollector::_collectorState =
 457                              CMSCollector::Idling;
 458 bool CMSCollector::_foregroundGCIsActive = false;
 459 bool CMSCollector::_foregroundGCShouldWait = false;
 460 
 461 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 462                            CardTableRS*                   ct,
 463                            ConcurrentMarkSweepPolicy*     cp):
 464   _cmsGen(cmsGen),
 465   _ct(ct),
 466   _ref_processor(NULL),    // will be set later
 467   _conc_workers(NULL),     // may be set later
 468   _abort_preclean(false),
 469   _start_sampling(false),
 470   _between_prologue_and_epilogue(false),
 471   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 472   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 473                  -1 /* lock-free */, "No_lock" /* dummy */),
 474   _modUnionClosure(&_modUnionTable),
 475   _modUnionClosurePar(&_modUnionTable),
 476   // Adjust my span to cover old (cms) gen
 477   _span(cmsGen->reserved()),
 478   // Construct the is_alive_closure with _span & markBitMap
 479   _is_alive_closure(_span, &_markBitMap),
 480   _restart_addr(NULL),
 481   _overflow_list(NULL),
 482   _stats(cmsGen),
 483   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 484                              //verify that this lock should be acquired with safepoint check.
 485                              Monitor::_safepoint_check_sometimes)),
 486   _eden_chunk_array(NULL),     // may be set in ctor body
 487   _eden_chunk_capacity(0),     // -- ditto --
 488   _eden_chunk_index(0),        // -- ditto --
 489   _survivor_plab_array(NULL),  // -- ditto --
 490   _survivor_chunk_array(NULL), // -- ditto --
 491   _survivor_chunk_capacity(0), // -- ditto --
 492   _survivor_chunk_index(0),    // -- ditto --
 493   _ser_pmc_preclean_ovflw(0),
 494   _ser_kac_preclean_ovflw(0),
 495   _ser_pmc_remark_ovflw(0),
 496   _par_pmc_remark_ovflw(0),
 497   _ser_kac_ovflw(0),
 498   _par_kac_ovflw(0),
 499 #ifndef PRODUCT
 500   _num_par_pushes(0),
 501 #endif
 502   _collection_count_start(0),
 503   _verifying(false),
 504   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 505   _completed_initialization(false),
 506   _collector_policy(cp),
 507   _should_unload_classes(CMSClassUnloadingEnabled),
 508   _concurrent_cycles_since_last_unload(0),
 509   _roots_scanning_options(SharedHeap::SO_None),
 510   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 511   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 512   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 513   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 514   _cms_start_registered(false)
 515 {
 516   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 517     ExplicitGCInvokesConcurrent = true;
 518   }
 519   // Now expand the span and allocate the collection support structures
 520   // (MUT, marking bit map etc.) to cover both generations subject to
 521   // collection.
 522 
 523   // For use by dirty card to oop closures.
 524   _cmsGen->cmsSpace()->set_collector(this);
 525 
 526   // Allocate MUT and marking bit map
 527   {
 528     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 529     if (!_markBitMap.allocate(_span)) {
 530       warning("Failed to allocate CMS Bit Map");
 531       return;
 532     }
 533     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 534   }
 535   {
 536     _modUnionTable.allocate(_span);
 537     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 538   }
 539 
 540   if (!_markStack.allocate(MarkStackSize)) {
 541     warning("Failed to allocate CMS Marking Stack");
 542     return;
 543   }
 544 
 545   // Support for multi-threaded concurrent phases
 546   if (CMSConcurrentMTEnabled) {
 547     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 548       // just for now
 549       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 550     }
 551     if (ConcGCThreads > 1) {
 552       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 553                                  ConcGCThreads, true);
 554       if (_conc_workers == NULL) {
 555         warning("GC/CMS: _conc_workers allocation failure: "
 556               "forcing -CMSConcurrentMTEnabled");
 557         CMSConcurrentMTEnabled = false;
 558       } else {
 559         _conc_workers->initialize_workers();
 560       }
 561     } else {
 562       CMSConcurrentMTEnabled = false;
 563     }
 564   }
 565   if (!CMSConcurrentMTEnabled) {
 566     ConcGCThreads = 0;
 567   } else {
 568     // Turn off CMSCleanOnEnter optimization temporarily for
 569     // the MT case where it's not fixed yet; see 6178663.
 570     CMSCleanOnEnter = false;
 571   }
 572   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 573          "Inconsistency");
 574 
 575   // Parallel task queues; these are shared for the
 576   // concurrent and stop-world phases of CMS, but
 577   // are not shared with parallel scavenge (ParNew).
 578   {
 579     uint i;
 580     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 581 
 582     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 583          || ParallelRefProcEnabled)
 584         && num_queues > 0) {
 585       _task_queues = new OopTaskQueueSet(num_queues);
 586       if (_task_queues == NULL) {
 587         warning("task_queues allocation failure.");
 588         return;
 589       }
 590       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 591       if (_hash_seed == NULL) {
 592         warning("_hash_seed array allocation failure");
 593         return;
 594       }
 595 
 596       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 597       for (i = 0; i < num_queues; i++) {
 598         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 599         if (q == NULL) {
 600           warning("work_queue allocation failure.");
 601           return;
 602         }
 603         _task_queues->register_queue(i, q);
 604       }
 605       for (i = 0; i < num_queues; i++) {
 606         _task_queues->queue(i)->initialize();
 607         _hash_seed[i] = 17;  // copied from ParNew
 608       }
 609     }
 610   }
 611 
 612   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 613 
 614   // Clip CMSBootstrapOccupancy between 0 and 100.
 615   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 616 
 617   // Now tell CMS generations the identity of their collector
 618   ConcurrentMarkSweepGeneration::set_collector(this);
 619 
 620   // Create & start a CMS thread for this CMS collector
 621   _cmsThread = ConcurrentMarkSweepThread::start(this);
 622   assert(cmsThread() != NULL, "CMS Thread should have been created");
 623   assert(cmsThread()->collector() == this,
 624          "CMS Thread should refer to this gen");
 625   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 626 
 627   // Support for parallelizing young gen rescan
 628   GenCollectedHeap* gch = GenCollectedHeap::heap();
 629   assert(gch->prev_gen(_cmsGen)->kind() == Generation::ParNew, "CMS can only be used with ParNew");
 630   _young_gen = (ParNewGeneration*)gch->prev_gen(_cmsGen);
 631   if (gch->supports_inline_contig_alloc()) {
 632     _top_addr = gch->top_addr();
 633     _end_addr = gch->end_addr();
 634     assert(_young_gen != NULL, "no _young_gen");
 635     _eden_chunk_index = 0;
 636     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 637     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 638     if (_eden_chunk_array == NULL) {
 639       _eden_chunk_capacity = 0;
 640       warning("GC/CMS: _eden_chunk_array allocation failure");
 641     }
 642   }
 643   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 644 
 645   // Support for parallelizing survivor space rescan
 646   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 647     const size_t max_plab_samples =
 648       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 649 
 650     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 651     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 652     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 653     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 654         || _cursor == NULL) {
 655       warning("Failed to allocate survivor plab/chunk array");
 656       if (_survivor_plab_array  != NULL) {
 657         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
 658         _survivor_plab_array = NULL;
 659       }
 660       if (_survivor_chunk_array != NULL) {
 661         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
 662         _survivor_chunk_array = NULL;
 663       }
 664       if (_cursor != NULL) {
 665         FREE_C_HEAP_ARRAY(size_t, _cursor);
 666         _cursor = NULL;
 667       }
 668     } else {
 669       _survivor_chunk_capacity = 2*max_plab_samples;
 670       for (uint i = 0; i < ParallelGCThreads; i++) {
 671         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 672         if (vec == NULL) {
 673           warning("Failed to allocate survivor plab array");
 674           for (int j = i; j > 0; j--) {
 675             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
 676           }
 677           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
 678           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
 679           _survivor_plab_array = NULL;
 680           _survivor_chunk_array = NULL;
 681           _survivor_chunk_capacity = 0;
 682           break;
 683         } else {
 684           ChunkArray* cur =
 685             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 686                                                         max_plab_samples);
 687           assert(cur->end() == 0, "Should be 0");
 688           assert(cur->array() == vec, "Should be vec");
 689           assert(cur->capacity() == max_plab_samples, "Error");
 690         }
 691       }
 692     }
 693   }
 694   assert(   (   _survivor_plab_array  != NULL
 695              && _survivor_chunk_array != NULL)
 696          || (   _survivor_chunk_capacity == 0
 697              && _survivor_chunk_index == 0),
 698          "Error");
 699 
 700   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 701   _gc_counters = new CollectorCounters("CMS", 1);
 702   _completed_initialization = true;
 703   _inter_sweep_timer.start();  // start of time
 704 }
 705 
 706 const char* ConcurrentMarkSweepGeneration::name() const {
 707   return "concurrent mark-sweep generation";
 708 }
 709 void ConcurrentMarkSweepGeneration::update_counters() {
 710   if (UsePerfData) {
 711     _space_counters->update_all();
 712     _gen_counters->update_all();
 713   }
 714 }
 715 
 716 // this is an optimized version of update_counters(). it takes the
 717 // used value as a parameter rather than computing it.
 718 //
 719 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 720   if (UsePerfData) {
 721     _space_counters->update_used(used);
 722     _space_counters->update_capacity();
 723     _gen_counters->update_all();
 724   }
 725 }
 726 
 727 void ConcurrentMarkSweepGeneration::print() const {
 728   Generation::print();
 729   cmsSpace()->print();
 730 }
 731 
 732 #ifndef PRODUCT
 733 void ConcurrentMarkSweepGeneration::print_statistics() {
 734   cmsSpace()->printFLCensus(0);
 735 }
 736 #endif
 737 
 738 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 739   GenCollectedHeap* gch = GenCollectedHeap::heap();
 740   if (PrintGCDetails) {
 741     if (Verbose) {
 742       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 743         level(), short_name(), s, used(), capacity());
 744     } else {
 745       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 746         level(), short_name(), s, used() / K, capacity() / K);
 747     }
 748   }
 749   if (Verbose) {
 750     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 751               gch->used(), gch->capacity());
 752   } else {
 753     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 754               gch->used() / K, gch->capacity() / K);
 755   }
 756 }
 757 
 758 size_t
 759 ConcurrentMarkSweepGeneration::contiguous_available() const {
 760   // dld proposes an improvement in precision here. If the committed
 761   // part of the space ends in a free block we should add that to
 762   // uncommitted size in the calculation below. Will make this
 763   // change later, staying with the approximation below for the
 764   // time being. -- ysr.
 765   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 766 }
 767 
 768 size_t
 769 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 770   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 771 }
 772 
 773 size_t ConcurrentMarkSweepGeneration::max_available() const {
 774   return free() + _virtual_space.uncommitted_size();
 775 }
 776 
 777 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 778   size_t available = max_available();
 779   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 780   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 781   if (Verbose && PrintGCDetails) {
 782     gclog_or_tty->print_cr(
 783       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 784       "max_promo("SIZE_FORMAT")",
 785       res? "":" not", available, res? ">=":"<",
 786       av_promo, max_promotion_in_bytes);
 787   }
 788   return res;
 789 }
 790 
 791 // At a promotion failure dump information on block layout in heap
 792 // (cms old generation).
 793 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 794   if (CMSDumpAtPromotionFailure) {
 795     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 796   }
 797 }
 798 
 799 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 800   // Clear the promotion information.  These pointers can be adjusted
 801   // along with all the other pointers into the heap but
 802   // compaction is expected to be a rare event with
 803   // a heap using cms so don't do it without seeing the need.
 804   for (uint i = 0; i < ParallelGCThreads; i++) {
 805     _par_gc_thread_states[i]->promo.reset();
 806   }
 807 }
 808 
 809 void ConcurrentMarkSweepGeneration::compute_new_size() {
 810   assert_locked_or_safepoint(Heap_lock);
 811 
 812   // If incremental collection failed, we just want to expand
 813   // to the limit.
 814   if (incremental_collection_failed()) {
 815     clear_incremental_collection_failed();
 816     grow_to_reserved();
 817     return;
 818   }
 819 
 820   // The heap has been compacted but not reset yet.
 821   // Any metric such as free() or used() will be incorrect.
 822 
 823   CardGeneration::compute_new_size();
 824 
 825   // Reset again after a possible resizing
 826   if (did_compact()) {
 827     cmsSpace()->reset_after_compaction();
 828   }
 829 }
 830 
 831 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 832   assert_locked_or_safepoint(Heap_lock);
 833 
 834   // If incremental collection failed, we just want to expand
 835   // to the limit.
 836   if (incremental_collection_failed()) {
 837     clear_incremental_collection_failed();
 838     grow_to_reserved();
 839     return;
 840   }
 841 
 842   double free_percentage = ((double) free()) / capacity();
 843   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 844   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 845 
 846   // compute expansion delta needed for reaching desired free percentage
 847   if (free_percentage < desired_free_percentage) {
 848     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 849     assert(desired_capacity >= capacity(), "invalid expansion size");
 850     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 851     if (PrintGCDetails && Verbose) {
 852       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 853       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 854       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 855       gclog_or_tty->print_cr("  Desired free fraction %f",
 856         desired_free_percentage);
 857       gclog_or_tty->print_cr("  Maximum free fraction %f",
 858         maximum_free_percentage);
 859       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
 860       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 861         desired_capacity/1000);
 862       int prev_level = level() - 1;
 863       if (prev_level >= 0) {
 864         size_t prev_size = 0;
 865         GenCollectedHeap* gch = GenCollectedHeap::heap();
 866         Generation* prev_gen = gch->get_gen(prev_level);
 867         prev_size = prev_gen->capacity();
 868           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 869                                  prev_size/1000);
 870       }
 871       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 872         unsafe_max_alloc_nogc()/1000);
 873       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 874         contiguous_available()/1000);
 875       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 876         expand_bytes);
 877     }
 878     // safe if expansion fails
 879     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 880     if (PrintGCDetails && Verbose) {
 881       gclog_or_tty->print_cr("  Expanded free fraction %f",
 882         ((double) free()) / capacity());
 883     }
 884   } else {
 885     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 886     assert(desired_capacity <= capacity(), "invalid expansion size");
 887     size_t shrink_bytes = capacity() - desired_capacity;
 888     // Don't shrink unless the delta is greater than the minimum shrink we want
 889     if (shrink_bytes >= MinHeapDeltaBytes) {
 890       shrink_free_list_by(shrink_bytes);
 891     }
 892   }
 893 }
 894 
 895 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 896   return cmsSpace()->freelistLock();
 897 }
 898 
 899 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
 900                                                   bool   tlab) {
 901   CMSSynchronousYieldRequest yr;
 902   MutexLockerEx x(freelistLock(),
 903                   Mutex::_no_safepoint_check_flag);
 904   return have_lock_and_allocate(size, tlab);
 905 }
 906 
 907 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
 908                                                   bool   tlab /* ignored */) {
 909   assert_lock_strong(freelistLock());
 910   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
 911   HeapWord* res = cmsSpace()->allocate(adjustedSize);
 912   // Allocate the object live (grey) if the background collector has
 913   // started marking. This is necessary because the marker may
 914   // have passed this address and consequently this object will
 915   // not otherwise be greyed and would be incorrectly swept up.
 916   // Note that if this object contains references, the writing
 917   // of those references will dirty the card containing this object
 918   // allowing the object to be blackened (and its references scanned)
 919   // either during a preclean phase or at the final checkpoint.
 920   if (res != NULL) {
 921     // We may block here with an uninitialized object with
 922     // its mark-bit or P-bits not yet set. Such objects need
 923     // to be safely navigable by block_start().
 924     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
 925     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
 926     collector()->direct_allocated(res, adjustedSize);
 927     _direct_allocated_words += adjustedSize;
 928     // allocation counters
 929     NOT_PRODUCT(
 930       _numObjectsAllocated++;
 931       _numWordsAllocated += (int)adjustedSize;
 932     )
 933   }
 934   return res;
 935 }
 936 
 937 // In the case of direct allocation by mutators in a generation that
 938 // is being concurrently collected, the object must be allocated
 939 // live (grey) if the background collector has started marking.
 940 // This is necessary because the marker may
 941 // have passed this address and consequently this object will
 942 // not otherwise be greyed and would be incorrectly swept up.
 943 // Note that if this object contains references, the writing
 944 // of those references will dirty the card containing this object
 945 // allowing the object to be blackened (and its references scanned)
 946 // either during a preclean phase or at the final checkpoint.
 947 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
 948   assert(_markBitMap.covers(start, size), "Out of bounds");
 949   if (_collectorState >= Marking) {
 950     MutexLockerEx y(_markBitMap.lock(),
 951                     Mutex::_no_safepoint_check_flag);
 952     // [see comments preceding SweepClosure::do_blk() below for details]
 953     //
 954     // Can the P-bits be deleted now?  JJJ
 955     //
 956     // 1. need to mark the object as live so it isn't collected
 957     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
 958     // 3. need to mark the end of the object so marking, precleaning or sweeping
 959     //    can skip over uninitialized or unparsable objects. An allocated
 960     //    object is considered uninitialized for our purposes as long as
 961     //    its klass word is NULL.  All old gen objects are parsable
 962     //    as soon as they are initialized.)
 963     _markBitMap.mark(start);          // object is live
 964     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
 965     _markBitMap.mark(start + size - 1);
 966                                       // mark end of object
 967   }
 968   // check that oop looks uninitialized
 969   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
 970 }
 971 
 972 void CMSCollector::promoted(bool par, HeapWord* start,
 973                             bool is_obj_array, size_t obj_size) {
 974   assert(_markBitMap.covers(start), "Out of bounds");
 975   // See comment in direct_allocated() about when objects should
 976   // be allocated live.
 977   if (_collectorState >= Marking) {
 978     // we already hold the marking bit map lock, taken in
 979     // the prologue
 980     if (par) {
 981       _markBitMap.par_mark(start);
 982     } else {
 983       _markBitMap.mark(start);
 984     }
 985     // We don't need to mark the object as uninitialized (as
 986     // in direct_allocated above) because this is being done with the
 987     // world stopped and the object will be initialized by the
 988     // time the marking, precleaning or sweeping get to look at it.
 989     // But see the code for copying objects into the CMS generation,
 990     // where we need to ensure that concurrent readers of the
 991     // block offset table are able to safely navigate a block that
 992     // is in flux from being free to being allocated (and in
 993     // transition while being copied into) and subsequently
 994     // becoming a bona-fide object when the copy/promotion is complete.
 995     assert(SafepointSynchronize::is_at_safepoint(),
 996            "expect promotion only at safepoints");
 997 
 998     if (_collectorState < Sweeping) {
 999       // Mark the appropriate cards in the modUnionTable, so that
1000       // this object gets scanned before the sweep. If this is
1001       // not done, CMS generation references in the object might
1002       // not get marked.
1003       // For the case of arrays, which are otherwise precisely
1004       // marked, we need to dirty the entire array, not just its head.
1005       if (is_obj_array) {
1006         // The [par_]mark_range() method expects mr.end() below to
1007         // be aligned to the granularity of a bit's representation
1008         // in the heap. In the case of the MUT below, that's a
1009         // card size.
1010         MemRegion mr(start,
1011                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1012                         CardTableModRefBS::card_size /* bytes */));
1013         if (par) {
1014           _modUnionTable.par_mark_range(mr);
1015         } else {
1016           _modUnionTable.mark_range(mr);
1017         }
1018       } else {  // not an obj array; we can just mark the head
1019         if (par) {
1020           _modUnionTable.par_mark(start);
1021         } else {
1022           _modUnionTable.mark(start);
1023         }
1024       }
1025     }
1026   }
1027 }
1028 
1029 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1030   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1031   // allocate, copy and if necessary update promoinfo --
1032   // delegate to underlying space.
1033   assert_lock_strong(freelistLock());
1034 
1035 #ifndef PRODUCT
1036   if (Universe::heap()->promotion_should_fail()) {
1037     return NULL;
1038   }
1039 #endif  // #ifndef PRODUCT
1040 
1041   oop res = _cmsSpace->promote(obj, obj_size);
1042   if (res == NULL) {
1043     // expand and retry
1044     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1045     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
1046     // Since there's currently no next generation, we don't try to promote
1047     // into a more senior generation.
1048     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1049                                "is made to pass on a possibly failing "
1050                                "promotion to next generation");
1051     res = _cmsSpace->promote(obj, obj_size);
1052   }
1053   if (res != NULL) {
1054     // See comment in allocate() about when objects should
1055     // be allocated live.
1056     assert(obj->is_oop(), "Will dereference klass pointer below");
1057     collector()->promoted(false,           // Not parallel
1058                           (HeapWord*)res, obj->is_objArray(), obj_size);
1059     // promotion counters
1060     NOT_PRODUCT(
1061       _numObjectsPromoted++;
1062       _numWordsPromoted +=
1063         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1064     )
1065   }
1066   return res;
1067 }
1068 
1069 
1070 // IMPORTANT: Notes on object size recognition in CMS.
1071 // ---------------------------------------------------
1072 // A block of storage in the CMS generation is always in
1073 // one of three states. A free block (FREE), an allocated
1074 // object (OBJECT) whose size() method reports the correct size,
1075 // and an intermediate state (TRANSIENT) in which its size cannot
1076 // be accurately determined.
1077 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1078 // -----------------------------------------------------
1079 // FREE:      klass_word & 1 == 1; mark_word holds block size
1080 //
1081 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1082 //            obj->size() computes correct size
1083 //
1084 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1085 //
1086 // STATE IDENTIFICATION: (64 bit+COOPS)
1087 // ------------------------------------
1088 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1089 //
1090 // OBJECT:    klass_word installed; klass_word != 0;
1091 //            obj->size() computes correct size
1092 //
1093 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1094 //
1095 //
1096 // STATE TRANSITION DIAGRAM
1097 //
1098 //        mut / parnew                     mut  /  parnew
1099 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1100 //  ^                                                                   |
1101 //  |------------------------ DEAD <------------------------------------|
1102 //         sweep                            mut
1103 //
1104 // While a block is in TRANSIENT state its size cannot be determined
1105 // so readers will either need to come back later or stall until
1106 // the size can be determined. Note that for the case of direct
1107 // allocation, P-bits, when available, may be used to determine the
1108 // size of an object that may not yet have been initialized.
1109 
1110 // Things to support parallel young-gen collection.
1111 oop
1112 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1113                                            oop old, markOop m,
1114                                            size_t word_sz) {
1115 #ifndef PRODUCT
1116   if (Universe::heap()->promotion_should_fail()) {
1117     return NULL;
1118   }
1119 #endif  // #ifndef PRODUCT
1120 
1121   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1122   PromotionInfo* promoInfo = &ps->promo;
1123   // if we are tracking promotions, then first ensure space for
1124   // promotion (including spooling space for saving header if necessary).
1125   // then allocate and copy, then track promoted info if needed.
1126   // When tracking (see PromotionInfo::track()), the mark word may
1127   // be displaced and in this case restoration of the mark word
1128   // occurs in the (oop_since_save_marks_)iterate phase.
1129   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1130     // Out of space for allocating spooling buffers;
1131     // try expanding and allocating spooling buffers.
1132     if (!expand_and_ensure_spooling_space(promoInfo)) {
1133       return NULL;
1134     }
1135   }
1136   assert(promoInfo->has_spooling_space(), "Control point invariant");
1137   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1138   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1139   if (obj_ptr == NULL) {
1140      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1141      if (obj_ptr == NULL) {
1142        return NULL;
1143      }
1144   }
1145   oop obj = oop(obj_ptr);
1146   OrderAccess::storestore();
1147   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1148   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1149   // IMPORTANT: See note on object initialization for CMS above.
1150   // Otherwise, copy the object.  Here we must be careful to insert the
1151   // klass pointer last, since this marks the block as an allocated object.
1152   // Except with compressed oops it's the mark word.
1153   HeapWord* old_ptr = (HeapWord*)old;
1154   // Restore the mark word copied above.
1155   obj->set_mark(m);
1156   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1157   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1158   OrderAccess::storestore();
1159 
1160   if (UseCompressedClassPointers) {
1161     // Copy gap missed by (aligned) header size calculation below
1162     obj->set_klass_gap(old->klass_gap());
1163   }
1164   if (word_sz > (size_t)oopDesc::header_size()) {
1165     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1166                                  obj_ptr + oopDesc::header_size(),
1167                                  word_sz - oopDesc::header_size());
1168   }
1169 
1170   // Now we can track the promoted object, if necessary.  We take care
1171   // to delay the transition from uninitialized to full object
1172   // (i.e., insertion of klass pointer) until after, so that it
1173   // atomically becomes a promoted object.
1174   if (promoInfo->tracking()) {
1175     promoInfo->track((PromotedObject*)obj, old->klass());
1176   }
1177   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1178   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1179   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1180 
1181   // Finally, install the klass pointer (this should be volatile).
1182   OrderAccess::storestore();
1183   obj->set_klass(old->klass());
1184   // We should now be able to calculate the right size for this object
1185   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1186 
1187   collector()->promoted(true,          // parallel
1188                         obj_ptr, old->is_objArray(), word_sz);
1189 
1190   NOT_PRODUCT(
1191     Atomic::inc_ptr(&_numObjectsPromoted);
1192     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1193   )
1194 
1195   return obj;
1196 }
1197 
1198 void
1199 ConcurrentMarkSweepGeneration::
1200 par_promote_alloc_done(int thread_num) {
1201   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1202   ps->lab.retire(thread_num);
1203 }
1204 
1205 void
1206 ConcurrentMarkSweepGeneration::
1207 par_oop_since_save_marks_iterate_done(int thread_num) {
1208   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1209   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1210   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1211 }
1212 
1213 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1214                                                    size_t size,
1215                                                    bool   tlab)
1216 {
1217   // We allow a STW collection only if a full
1218   // collection was requested.
1219   return full || should_allocate(size, tlab); // FIX ME !!!
1220   // This and promotion failure handling are connected at the
1221   // hip and should be fixed by untying them.
1222 }
1223 
1224 bool CMSCollector::shouldConcurrentCollect() {
1225   if (_full_gc_requested) {
1226     if (Verbose && PrintGCDetails) {
1227       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1228                              " gc request (or gc_locker)");
1229     }
1230     return true;
1231   }
1232 
1233   FreelistLocker x(this);
1234   // ------------------------------------------------------------------
1235   // Print out lots of information which affects the initiation of
1236   // a collection.
1237   if (PrintCMSInitiationStatistics && stats().valid()) {
1238     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1239     gclog_or_tty->stamp();
1240     gclog_or_tty->cr();
1241     stats().print_on(gclog_or_tty);
1242     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1243       stats().time_until_cms_gen_full());
1244     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1245     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1246                            _cmsGen->contiguous_available());
1247     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1248     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1249     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1250     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1251     gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1252     gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1253     gclog_or_tty->print_cr("metadata initialized %d",
1254       MetaspaceGC::should_concurrent_collect());
1255   }
1256   // ------------------------------------------------------------------
1257 
1258   // If the estimated time to complete a cms collection (cms_duration())
1259   // is less than the estimated time remaining until the cms generation
1260   // is full, start a collection.
1261   if (!UseCMSInitiatingOccupancyOnly) {
1262     if (stats().valid()) {
1263       if (stats().time_until_cms_start() == 0.0) {
1264         return true;
1265       }
1266     } else {
1267       // We want to conservatively collect somewhat early in order
1268       // to try and "bootstrap" our CMS/promotion statistics;
1269       // this branch will not fire after the first successful CMS
1270       // collection because the stats should then be valid.
1271       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1272         if (Verbose && PrintGCDetails) {
1273           gclog_or_tty->print_cr(
1274             " CMSCollector: collect for bootstrapping statistics:"
1275             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1276             _bootstrap_occupancy);
1277         }
1278         return true;
1279       }
1280     }
1281   }
1282 
1283   // Otherwise, we start a collection cycle if
1284   // old gen want a collection cycle started. Each may use
1285   // an appropriate criterion for making this decision.
1286   // XXX We need to make sure that the gen expansion
1287   // criterion dovetails well with this. XXX NEED TO FIX THIS
1288   if (_cmsGen->should_concurrent_collect()) {
1289     if (Verbose && PrintGCDetails) {
1290       gclog_or_tty->print_cr("CMS old gen initiated");
1291     }
1292     return true;
1293   }
1294 
1295   // We start a collection if we believe an incremental collection may fail;
1296   // this is not likely to be productive in practice because it's probably too
1297   // late anyway.
1298   GenCollectedHeap* gch = GenCollectedHeap::heap();
1299   assert(gch->collector_policy()->is_generation_policy(),
1300          "You may want to check the correctness of the following");
1301   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1302     if (Verbose && PrintGCDetails) {
1303       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1304     }
1305     return true;
1306   }
1307 
1308   if (MetaspaceGC::should_concurrent_collect()) {
1309     if (Verbose && PrintGCDetails) {
1310       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1311     }
1312     return true;
1313   }
1314 
1315   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1316   if (CMSTriggerInterval >= 0) {
1317     if (CMSTriggerInterval == 0) {
1318       // Trigger always
1319       return true;
1320     }
1321 
1322     // Check the CMS time since begin (we do not check the stats validity
1323     // as we want to be able to trigger the first CMS cycle as well)
1324     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1325       if (Verbose && PrintGCDetails) {
1326         if (stats().valid()) {
1327           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1328                                  stats().cms_time_since_begin());
1329         } else {
1330           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1331         }
1332       }
1333       return true;
1334     }
1335   }
1336 
1337   return false;
1338 }
1339 
1340 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1341 
1342 // Clear _expansion_cause fields of constituent generations
1343 void CMSCollector::clear_expansion_cause() {
1344   _cmsGen->clear_expansion_cause();
1345 }
1346 
1347 // We should be conservative in starting a collection cycle.  To
1348 // start too eagerly runs the risk of collecting too often in the
1349 // extreme.  To collect too rarely falls back on full collections,
1350 // which works, even if not optimum in terms of concurrent work.
1351 // As a work around for too eagerly collecting, use the flag
1352 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1353 // giving the user an easily understandable way of controlling the
1354 // collections.
1355 // We want to start a new collection cycle if any of the following
1356 // conditions hold:
1357 // . our current occupancy exceeds the configured initiating occupancy
1358 //   for this generation, or
1359 // . we recently needed to expand this space and have not, since that
1360 //   expansion, done a collection of this generation, or
1361 // . the underlying space believes that it may be a good idea to initiate
1362 //   a concurrent collection (this may be based on criteria such as the
1363 //   following: the space uses linear allocation and linear allocation is
1364 //   going to fail, or there is believed to be excessive fragmentation in
1365 //   the generation, etc... or ...
1366 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1367 //   the case of the old generation; see CR 6543076):
1368 //   we may be approaching a point at which allocation requests may fail because
1369 //   we will be out of sufficient free space given allocation rate estimates.]
1370 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1371 
1372   assert_lock_strong(freelistLock());
1373   if (occupancy() > initiating_occupancy()) {
1374     if (PrintGCDetails && Verbose) {
1375       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1376         short_name(), occupancy(), initiating_occupancy());
1377     }
1378     return true;
1379   }
1380   if (UseCMSInitiatingOccupancyOnly) {
1381     return false;
1382   }
1383   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1384     if (PrintGCDetails && Verbose) {
1385       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1386         short_name());
1387     }
1388     return true;
1389   }
1390   if (_cmsSpace->should_concurrent_collect()) {
1391     if (PrintGCDetails && Verbose) {
1392       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1393         short_name());
1394     }
1395     return true;
1396   }
1397   return false;
1398 }
1399 
1400 void ConcurrentMarkSweepGeneration::collect(bool   full,
1401                                             bool   clear_all_soft_refs,
1402                                             size_t size,
1403                                             bool   tlab)
1404 {
1405   collector()->collect(full, clear_all_soft_refs, size, tlab);
1406 }
1407 
1408 void CMSCollector::collect(bool   full,
1409                            bool   clear_all_soft_refs,
1410                            size_t size,
1411                            bool   tlab)
1412 {
1413   // The following "if" branch is present for defensive reasons.
1414   // In the current uses of this interface, it can be replaced with:
1415   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1416   // But I am not placing that assert here to allow future
1417   // generality in invoking this interface.
1418   if (GC_locker::is_active()) {
1419     // A consistency test for GC_locker
1420     assert(GC_locker::needs_gc(), "Should have been set already");
1421     // Skip this foreground collection, instead
1422     // expanding the heap if necessary.
1423     // Need the free list locks for the call to free() in compute_new_size()
1424     compute_new_size();
1425     return;
1426   }
1427   acquire_control_and_collect(full, clear_all_soft_refs);
1428 }
1429 
1430 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1431   GenCollectedHeap* gch = GenCollectedHeap::heap();
1432   unsigned int gc_count = gch->total_full_collections();
1433   if (gc_count == full_gc_count) {
1434     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1435     _full_gc_requested = true;
1436     _full_gc_cause = cause;
1437     CGC_lock->notify();   // nudge CMS thread
1438   } else {
1439     assert(gc_count > full_gc_count, "Error: causal loop");
1440   }
1441 }
1442 
1443 bool CMSCollector::is_external_interruption() {
1444   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1445   return GCCause::is_user_requested_gc(cause) ||
1446          GCCause::is_serviceability_requested_gc(cause);
1447 }
1448 
1449 void CMSCollector::report_concurrent_mode_interruption() {
1450   if (is_external_interruption()) {
1451     if (PrintGCDetails) {
1452       gclog_or_tty->print(" (concurrent mode interrupted)");
1453     }
1454   } else {
1455     if (PrintGCDetails) {
1456       gclog_or_tty->print(" (concurrent mode failure)");
1457     }
1458     _gc_tracer_cm->report_concurrent_mode_failure();
1459   }
1460 }
1461 
1462 
1463 // The foreground and background collectors need to coordinate in order
1464 // to make sure that they do not mutually interfere with CMS collections.
1465 // When a background collection is active,
1466 // the foreground collector may need to take over (preempt) and
1467 // synchronously complete an ongoing collection. Depending on the
1468 // frequency of the background collections and the heap usage
1469 // of the application, this preemption can be seldom or frequent.
1470 // There are only certain
1471 // points in the background collection that the "collection-baton"
1472 // can be passed to the foreground collector.
1473 //
1474 // The foreground collector will wait for the baton before
1475 // starting any part of the collection.  The foreground collector
1476 // will only wait at one location.
1477 //
1478 // The background collector will yield the baton before starting a new
1479 // phase of the collection (e.g., before initial marking, marking from roots,
1480 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1481 // of the loop which switches the phases. The background collector does some
1482 // of the phases (initial mark, final re-mark) with the world stopped.
1483 // Because of locking involved in stopping the world,
1484 // the foreground collector should not block waiting for the background
1485 // collector when it is doing a stop-the-world phase.  The background
1486 // collector will yield the baton at an additional point just before
1487 // it enters a stop-the-world phase.  Once the world is stopped, the
1488 // background collector checks the phase of the collection.  If the
1489 // phase has not changed, it proceeds with the collection.  If the
1490 // phase has changed, it skips that phase of the collection.  See
1491 // the comments on the use of the Heap_lock in collect_in_background().
1492 //
1493 // Variable used in baton passing.
1494 //   _foregroundGCIsActive - Set to true by the foreground collector when
1495 //      it wants the baton.  The foreground clears it when it has finished
1496 //      the collection.
1497 //   _foregroundGCShouldWait - Set to true by the background collector
1498 //        when it is running.  The foreground collector waits while
1499 //      _foregroundGCShouldWait is true.
1500 //  CGC_lock - monitor used to protect access to the above variables
1501 //      and to notify the foreground and background collectors.
1502 //  _collectorState - current state of the CMS collection.
1503 //
1504 // The foreground collector
1505 //   acquires the CGC_lock
1506 //   sets _foregroundGCIsActive
1507 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1508 //     various locks acquired in preparation for the collection
1509 //     are released so as not to block the background collector
1510 //     that is in the midst of a collection
1511 //   proceeds with the collection
1512 //   clears _foregroundGCIsActive
1513 //   returns
1514 //
1515 // The background collector in a loop iterating on the phases of the
1516 //      collection
1517 //   acquires the CGC_lock
1518 //   sets _foregroundGCShouldWait
1519 //   if _foregroundGCIsActive is set
1520 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1521 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1522 //     and exits the loop.
1523 //   otherwise
1524 //     proceed with that phase of the collection
1525 //     if the phase is a stop-the-world phase,
1526 //       yield the baton once more just before enqueueing
1527 //       the stop-world CMS operation (executed by the VM thread).
1528 //   returns after all phases of the collection are done
1529 //
1530 
1531 void CMSCollector::acquire_control_and_collect(bool full,
1532         bool clear_all_soft_refs) {
1533   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1534   assert(!Thread::current()->is_ConcurrentGC_thread(),
1535          "shouldn't try to acquire control from self!");
1536 
1537   // Start the protocol for acquiring control of the
1538   // collection from the background collector (aka CMS thread).
1539   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1540          "VM thread should have CMS token");
1541   // Remember the possibly interrupted state of an ongoing
1542   // concurrent collection
1543   CollectorState first_state = _collectorState;
1544 
1545   // Signal to a possibly ongoing concurrent collection that
1546   // we want to do a foreground collection.
1547   _foregroundGCIsActive = true;
1548 
1549   // release locks and wait for a notify from the background collector
1550   // releasing the locks in only necessary for phases which
1551   // do yields to improve the granularity of the collection.
1552   assert_lock_strong(bitMapLock());
1553   // We need to lock the Free list lock for the space that we are
1554   // currently collecting.
1555   assert(haveFreelistLocks(), "Must be holding free list locks");
1556   bitMapLock()->unlock();
1557   releaseFreelistLocks();
1558   {
1559     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1560     if (_foregroundGCShouldWait) {
1561       // We are going to be waiting for action for the CMS thread;
1562       // it had better not be gone (for instance at shutdown)!
1563       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1564              "CMS thread must be running");
1565       // Wait here until the background collector gives us the go-ahead
1566       ConcurrentMarkSweepThread::clear_CMS_flag(
1567         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1568       // Get a possibly blocked CMS thread going:
1569       //   Note that we set _foregroundGCIsActive true above,
1570       //   without protection of the CGC_lock.
1571       CGC_lock->notify();
1572       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1573              "Possible deadlock");
1574       while (_foregroundGCShouldWait) {
1575         // wait for notification
1576         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1577         // Possibility of delay/starvation here, since CMS token does
1578         // not know to give priority to VM thread? Actually, i think
1579         // there wouldn't be any delay/starvation, but the proof of
1580         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1581       }
1582       ConcurrentMarkSweepThread::set_CMS_flag(
1583         ConcurrentMarkSweepThread::CMS_vm_has_token);
1584     }
1585   }
1586   // The CMS_token is already held.  Get back the other locks.
1587   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1588          "VM thread should have CMS token");
1589   getFreelistLocks();
1590   bitMapLock()->lock_without_safepoint_check();
1591   if (TraceCMSState) {
1592     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1593       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1594     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1595   }
1596 
1597   // Inform cms gen if this was due to partial collection failing.
1598   // The CMS gen may use this fact to determine its expansion policy.
1599   GenCollectedHeap* gch = GenCollectedHeap::heap();
1600   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1601     assert(!_cmsGen->incremental_collection_failed(),
1602            "Should have been noticed, reacted to and cleared");
1603     _cmsGen->set_incremental_collection_failed();
1604   }
1605 
1606   if (first_state > Idling) {
1607     report_concurrent_mode_interruption();
1608   }
1609 
1610   set_did_compact(true);
1611 
1612   // If the collection is being acquired from the background
1613   // collector, there may be references on the discovered
1614   // references lists.  Abandon those references, since some
1615   // of them may have become unreachable after concurrent
1616   // discovery; the STW compacting collector will redo discovery
1617   // more precisely, without being subject to floating garbage.
1618   // Leaving otherwise unreachable references in the discovered
1619   // lists would require special handling.
1620   ref_processor()->disable_discovery();
1621   ref_processor()->abandon_partial_discovery();
1622   ref_processor()->verify_no_references_recorded();
1623 
1624   if (first_state > Idling) {
1625     save_heap_summary();
1626   }
1627 
1628   do_compaction_work(clear_all_soft_refs);
1629 
1630   // Has the GC time limit been exceeded?
1631   size_t max_eden_size = _young_gen->max_capacity() -
1632                          _young_gen->to()->capacity() -
1633                          _young_gen->from()->capacity();
1634   GCCause::Cause gc_cause = gch->gc_cause();
1635   size_policy()->check_gc_overhead_limit(_young_gen->used(),
1636                                          _young_gen->eden()->used(),
1637                                          _cmsGen->max_capacity(),
1638                                          max_eden_size,
1639                                          full,
1640                                          gc_cause,
1641                                          gch->collector_policy());
1642 
1643   // Reset the expansion cause, now that we just completed
1644   // a collection cycle.
1645   clear_expansion_cause();
1646   _foregroundGCIsActive = false;
1647   return;
1648 }
1649 
1650 // Resize the tenured generation
1651 // after obtaining the free list locks for the
1652 // two generations.
1653 void CMSCollector::compute_new_size() {
1654   assert_locked_or_safepoint(Heap_lock);
1655   FreelistLocker z(this);
1656   MetaspaceGC::compute_new_size();
1657   _cmsGen->compute_new_size_free_list();
1658 }
1659 
1660 // A work method used by the foreground collector to do
1661 // a mark-sweep-compact.
1662 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1663   GenCollectedHeap* gch = GenCollectedHeap::heap();
1664 
1665   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1666   gc_timer->register_gc_start();
1667 
1668   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1669   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1670 
1671   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1672 
1673   // Temporarily widen the span of the weak reference processing to
1674   // the entire heap.
1675   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1676   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1677   // Temporarily, clear the "is_alive_non_header" field of the
1678   // reference processor.
1679   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1680   // Temporarily make reference _processing_ single threaded (non-MT).
1681   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1682   // Temporarily make refs discovery atomic
1683   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1684   // Temporarily make reference _discovery_ single threaded (non-MT)
1685   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1686 
1687   ref_processor()->set_enqueuing_is_done(false);
1688   ref_processor()->enable_discovery();
1689   ref_processor()->setup_policy(clear_all_soft_refs);
1690   // If an asynchronous collection finishes, the _modUnionTable is
1691   // all clear.  If we are assuming the collection from an asynchronous
1692   // collection, clear the _modUnionTable.
1693   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1694     "_modUnionTable should be clear if the baton was not passed");
1695   _modUnionTable.clear_all();
1696   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1697     "mod union for klasses should be clear if the baton was passed");
1698   _ct->klass_rem_set()->clear_mod_union();
1699 
1700   // We must adjust the allocation statistics being maintained
1701   // in the free list space. We do so by reading and clearing
1702   // the sweep timer and updating the block flux rate estimates below.
1703   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1704   if (_inter_sweep_timer.is_active()) {
1705     _inter_sweep_timer.stop();
1706     // Note that we do not use this sample to update the _inter_sweep_estimate.
1707     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1708                                             _inter_sweep_estimate.padded_average(),
1709                                             _intra_sweep_estimate.padded_average());
1710   }
1711 
1712   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1713     ref_processor(), clear_all_soft_refs);
1714   #ifdef ASSERT
1715     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1716     size_t free_size = cms_space->free();
1717     assert(free_size ==
1718            pointer_delta(cms_space->end(), cms_space->compaction_top())
1719            * HeapWordSize,
1720       "All the free space should be compacted into one chunk at top");
1721     assert(cms_space->dictionary()->total_chunk_size(
1722                                       debug_only(cms_space->freelistLock())) == 0 ||
1723            cms_space->totalSizeInIndexedFreeLists() == 0,
1724       "All the free space should be in a single chunk");
1725     size_t num = cms_space->totalCount();
1726     assert((free_size == 0 && num == 0) ||
1727            (free_size > 0  && (num == 1 || num == 2)),
1728          "There should be at most 2 free chunks after compaction");
1729   #endif // ASSERT
1730   _collectorState = Resetting;
1731   assert(_restart_addr == NULL,
1732          "Should have been NULL'd before baton was passed");
1733   reset(false /* == !concurrent */);
1734   _cmsGen->reset_after_compaction();
1735   _concurrent_cycles_since_last_unload = 0;
1736 
1737   // Clear any data recorded in the PLAB chunk arrays.
1738   if (_survivor_plab_array != NULL) {
1739     reset_survivor_plab_arrays();
1740   }
1741 
1742   // Adjust the per-size allocation stats for the next epoch.
1743   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1744   // Restart the "inter sweep timer" for the next epoch.
1745   _inter_sweep_timer.reset();
1746   _inter_sweep_timer.start();
1747 
1748   gc_timer->register_gc_end();
1749 
1750   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1751 
1752   // For a mark-sweep-compact, compute_new_size() will be called
1753   // in the heap's do_collection() method.
1754 }
1755 
1756 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1757   ContiguousSpace* eden_space = _young_gen->eden();
1758   ContiguousSpace* from_space = _young_gen->from();
1759   ContiguousSpace* to_space   = _young_gen->to();
1760   // Eden
1761   if (_eden_chunk_array != NULL) {
1762     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1763                            eden_space->bottom(), eden_space->top(),
1764                            eden_space->end(), eden_space->capacity());
1765     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1766                            "_eden_chunk_capacity=" SIZE_FORMAT,
1767                            _eden_chunk_index, _eden_chunk_capacity);
1768     for (size_t i = 0; i < _eden_chunk_index; i++) {
1769       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1770                              i, _eden_chunk_array[i]);
1771     }
1772   }
1773   // Survivor
1774   if (_survivor_chunk_array != NULL) {
1775     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1776                            from_space->bottom(), from_space->top(),
1777                            from_space->end(), from_space->capacity());
1778     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1779                            "_survivor_chunk_capacity=" SIZE_FORMAT,
1780                            _survivor_chunk_index, _survivor_chunk_capacity);
1781     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1782       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1783                              i, _survivor_chunk_array[i]);
1784     }
1785   }
1786 }
1787 
1788 void CMSCollector::getFreelistLocks() const {
1789   // Get locks for all free lists in all generations that this
1790   // collector is responsible for
1791   _cmsGen->freelistLock()->lock_without_safepoint_check();
1792 }
1793 
1794 void CMSCollector::releaseFreelistLocks() const {
1795   // Release locks for all free lists in all generations that this
1796   // collector is responsible for
1797   _cmsGen->freelistLock()->unlock();
1798 }
1799 
1800 bool CMSCollector::haveFreelistLocks() const {
1801   // Check locks for all free lists in all generations that this
1802   // collector is responsible for
1803   assert_lock_strong(_cmsGen->freelistLock());
1804   PRODUCT_ONLY(ShouldNotReachHere());
1805   return true;
1806 }
1807 
1808 // A utility class that is used by the CMS collector to
1809 // temporarily "release" the foreground collector from its
1810 // usual obligation to wait for the background collector to
1811 // complete an ongoing phase before proceeding.
1812 class ReleaseForegroundGC: public StackObj {
1813  private:
1814   CMSCollector* _c;
1815  public:
1816   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1817     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1818     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1819     // allow a potentially blocked foreground collector to proceed
1820     _c->_foregroundGCShouldWait = false;
1821     if (_c->_foregroundGCIsActive) {
1822       CGC_lock->notify();
1823     }
1824     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1825            "Possible deadlock");
1826   }
1827 
1828   ~ReleaseForegroundGC() {
1829     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1830     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1831     _c->_foregroundGCShouldWait = true;
1832   }
1833 };
1834 
1835 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1836   assert(Thread::current()->is_ConcurrentGC_thread(),
1837     "A CMS asynchronous collection is only allowed on a CMS thread.");
1838 
1839   GenCollectedHeap* gch = GenCollectedHeap::heap();
1840   {
1841     bool safepoint_check = Mutex::_no_safepoint_check_flag;
1842     MutexLockerEx hl(Heap_lock, safepoint_check);
1843     FreelistLocker fll(this);
1844     MutexLockerEx x(CGC_lock, safepoint_check);
1845     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
1846       // The foreground collector is active or we're
1847       // not using asynchronous collections.  Skip this
1848       // background collection.
1849       assert(!_foregroundGCShouldWait, "Should be clear");
1850       return;
1851     } else {
1852       assert(_collectorState == Idling, "Should be idling before start.");
1853       _collectorState = InitialMarking;
1854       register_gc_start(cause);
1855       // Reset the expansion cause, now that we are about to begin
1856       // a new cycle.
1857       clear_expansion_cause();
1858 
1859       // Clear the MetaspaceGC flag since a concurrent collection
1860       // is starting but also clear it after the collection.
1861       MetaspaceGC::set_should_concurrent_collect(false);
1862     }
1863     // Decide if we want to enable class unloading as part of the
1864     // ensuing concurrent GC cycle.
1865     update_should_unload_classes();
1866     _full_gc_requested = false;           // acks all outstanding full gc requests
1867     _full_gc_cause = GCCause::_no_gc;
1868     // Signal that we are about to start a collection
1869     gch->increment_total_full_collections();  // ... starting a collection cycle
1870     _collection_count_start = gch->total_full_collections();
1871   }
1872 
1873   // Used for PrintGC
1874   size_t prev_used;
1875   if (PrintGC && Verbose) {
1876     prev_used = _cmsGen->used();
1877   }
1878 
1879   // The change of the collection state is normally done at this level;
1880   // the exceptions are phases that are executed while the world is
1881   // stopped.  For those phases the change of state is done while the
1882   // world is stopped.  For baton passing purposes this allows the
1883   // background collector to finish the phase and change state atomically.
1884   // The foreground collector cannot wait on a phase that is done
1885   // while the world is stopped because the foreground collector already
1886   // has the world stopped and would deadlock.
1887   while (_collectorState != Idling) {
1888     if (TraceCMSState) {
1889       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1890         Thread::current(), _collectorState);
1891     }
1892     // The foreground collector
1893     //   holds the Heap_lock throughout its collection.
1894     //   holds the CMS token (but not the lock)
1895     //     except while it is waiting for the background collector to yield.
1896     //
1897     // The foreground collector should be blocked (not for long)
1898     //   if the background collector is about to start a phase
1899     //   executed with world stopped.  If the background
1900     //   collector has already started such a phase, the
1901     //   foreground collector is blocked waiting for the
1902     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1903     //   are executed in the VM thread.
1904     //
1905     // The locking order is
1906     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1907     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1908     //   CMS token  (claimed in
1909     //                stop_world_and_do() -->
1910     //                  safepoint_synchronize() -->
1911     //                    CMSThread::synchronize())
1912 
1913     {
1914       // Check if the FG collector wants us to yield.
1915       CMSTokenSync x(true); // is cms thread
1916       if (waitForForegroundGC()) {
1917         // We yielded to a foreground GC, nothing more to be
1918         // done this round.
1919         assert(_foregroundGCShouldWait == false, "We set it to false in "
1920                "waitForForegroundGC()");
1921         if (TraceCMSState) {
1922           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1923             " exiting collection CMS state %d",
1924             Thread::current(), _collectorState);
1925         }
1926         return;
1927       } else {
1928         // The background collector can run but check to see if the
1929         // foreground collector has done a collection while the
1930         // background collector was waiting to get the CGC_lock
1931         // above.  If yes, break so that _foregroundGCShouldWait
1932         // is cleared before returning.
1933         if (_collectorState == Idling) {
1934           break;
1935         }
1936       }
1937     }
1938 
1939     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1940       "should be waiting");
1941 
1942     switch (_collectorState) {
1943       case InitialMarking:
1944         {
1945           ReleaseForegroundGC x(this);
1946           stats().record_cms_begin();
1947           VM_CMS_Initial_Mark initial_mark_op(this);
1948           VMThread::execute(&initial_mark_op);
1949         }
1950         // The collector state may be any legal state at this point
1951         // since the background collector may have yielded to the
1952         // foreground collector.
1953         break;
1954       case Marking:
1955         // initial marking in checkpointRootsInitialWork has been completed
1956         if (markFromRoots()) { // we were successful
1957           assert(_collectorState == Precleaning, "Collector state should "
1958             "have changed");
1959         } else {
1960           assert(_foregroundGCIsActive, "Internal state inconsistency");
1961         }
1962         break;
1963       case Precleaning:
1964         // marking from roots in markFromRoots has been completed
1965         preclean();
1966         assert(_collectorState == AbortablePreclean ||
1967                _collectorState == FinalMarking,
1968                "Collector state should have changed");
1969         break;
1970       case AbortablePreclean:
1971         abortable_preclean();
1972         assert(_collectorState == FinalMarking, "Collector state should "
1973           "have changed");
1974         break;
1975       case FinalMarking:
1976         {
1977           ReleaseForegroundGC x(this);
1978 
1979           VM_CMS_Final_Remark final_remark_op(this);
1980           VMThread::execute(&final_remark_op);
1981         }
1982         assert(_foregroundGCShouldWait, "block post-condition");
1983         break;
1984       case Sweeping:
1985         // final marking in checkpointRootsFinal has been completed
1986         sweep();
1987         assert(_collectorState == Resizing, "Collector state change "
1988           "to Resizing must be done under the free_list_lock");
1989 
1990       case Resizing: {
1991         // Sweeping has been completed...
1992         // At this point the background collection has completed.
1993         // Don't move the call to compute_new_size() down
1994         // into code that might be executed if the background
1995         // collection was preempted.
1996         {
1997           ReleaseForegroundGC x(this);   // unblock FG collection
1998           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1999           CMSTokenSync        z(true);   // not strictly needed.
2000           if (_collectorState == Resizing) {
2001             compute_new_size();
2002             save_heap_summary();
2003             _collectorState = Resetting;
2004           } else {
2005             assert(_collectorState == Idling, "The state should only change"
2006                    " because the foreground collector has finished the collection");
2007           }
2008         }
2009         break;
2010       }
2011       case Resetting:
2012         // CMS heap resizing has been completed
2013         reset(true);
2014         assert(_collectorState == Idling, "Collector state should "
2015           "have changed");
2016 
2017         MetaspaceGC::set_should_concurrent_collect(false);
2018 
2019         stats().record_cms_end();
2020         // Don't move the concurrent_phases_end() and compute_new_size()
2021         // calls to here because a preempted background collection
2022         // has it's state set to "Resetting".
2023         break;
2024       case Idling:
2025       default:
2026         ShouldNotReachHere();
2027         break;
2028     }
2029     if (TraceCMSState) {
2030       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2031         Thread::current(), _collectorState);
2032     }
2033     assert(_foregroundGCShouldWait, "block post-condition");
2034   }
2035 
2036   // Should this be in gc_epilogue?
2037   collector_policy()->counters()->update_counters();
2038 
2039   {
2040     // Clear _foregroundGCShouldWait and, in the event that the
2041     // foreground collector is waiting, notify it, before
2042     // returning.
2043     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2044     _foregroundGCShouldWait = false;
2045     if (_foregroundGCIsActive) {
2046       CGC_lock->notify();
2047     }
2048     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2049            "Possible deadlock");
2050   }
2051   if (TraceCMSState) {
2052     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2053       " exiting collection CMS state %d",
2054       Thread::current(), _collectorState);
2055   }
2056   if (PrintGC && Verbose) {
2057     _cmsGen->print_heap_change(prev_used);
2058   }
2059 }
2060 
2061 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2062   _cms_start_registered = true;
2063   _gc_timer_cm->register_gc_start();
2064   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2065 }
2066 
2067 void CMSCollector::register_gc_end() {
2068   if (_cms_start_registered) {
2069     report_heap_summary(GCWhen::AfterGC);
2070 
2071     _gc_timer_cm->register_gc_end();
2072     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2073     _cms_start_registered = false;
2074   }
2075 }
2076 
2077 void CMSCollector::save_heap_summary() {
2078   GenCollectedHeap* gch = GenCollectedHeap::heap();
2079   _last_heap_summary = gch->create_heap_summary();
2080   _last_metaspace_summary = gch->create_metaspace_summary();
2081 }
2082 
2083 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2084   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2085   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2086 }
2087 
2088 bool CMSCollector::waitForForegroundGC() {
2089   bool res = false;
2090   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2091          "CMS thread should have CMS token");
2092   // Block the foreground collector until the
2093   // background collectors decides whether to
2094   // yield.
2095   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2096   _foregroundGCShouldWait = true;
2097   if (_foregroundGCIsActive) {
2098     // The background collector yields to the
2099     // foreground collector and returns a value
2100     // indicating that it has yielded.  The foreground
2101     // collector can proceed.
2102     res = true;
2103     _foregroundGCShouldWait = false;
2104     ConcurrentMarkSweepThread::clear_CMS_flag(
2105       ConcurrentMarkSweepThread::CMS_cms_has_token);
2106     ConcurrentMarkSweepThread::set_CMS_flag(
2107       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2108     // Get a possibly blocked foreground thread going
2109     CGC_lock->notify();
2110     if (TraceCMSState) {
2111       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2112         Thread::current(), _collectorState);
2113     }
2114     while (_foregroundGCIsActive) {
2115       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2116     }
2117     ConcurrentMarkSweepThread::set_CMS_flag(
2118       ConcurrentMarkSweepThread::CMS_cms_has_token);
2119     ConcurrentMarkSweepThread::clear_CMS_flag(
2120       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2121   }
2122   if (TraceCMSState) {
2123     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2124       Thread::current(), _collectorState);
2125   }
2126   return res;
2127 }
2128 
2129 // Because of the need to lock the free lists and other structures in
2130 // the collector, common to all the generations that the collector is
2131 // collecting, we need the gc_prologues of individual CMS generations
2132 // delegate to their collector. It may have been simpler had the
2133 // current infrastructure allowed one to call a prologue on a
2134 // collector. In the absence of that we have the generation's
2135 // prologue delegate to the collector, which delegates back
2136 // some "local" work to a worker method in the individual generations
2137 // that it's responsible for collecting, while itself doing any
2138 // work common to all generations it's responsible for. A similar
2139 // comment applies to the  gc_epilogue()'s.
2140 // The role of the variable _between_prologue_and_epilogue is to
2141 // enforce the invocation protocol.
2142 void CMSCollector::gc_prologue(bool full) {
2143   // Call gc_prologue_work() for the CMSGen
2144   // we are responsible for.
2145 
2146   // The following locking discipline assumes that we are only called
2147   // when the world is stopped.
2148   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2149 
2150   // The CMSCollector prologue must call the gc_prologues for the
2151   // "generations" that it's responsible
2152   // for.
2153 
2154   assert(   Thread::current()->is_VM_thread()
2155          || (   CMSScavengeBeforeRemark
2156              && Thread::current()->is_ConcurrentGC_thread()),
2157          "Incorrect thread type for prologue execution");
2158 
2159   if (_between_prologue_and_epilogue) {
2160     // We have already been invoked; this is a gc_prologue delegation
2161     // from yet another CMS generation that we are responsible for, just
2162     // ignore it since all relevant work has already been done.
2163     return;
2164   }
2165 
2166   // set a bit saying prologue has been called; cleared in epilogue
2167   _between_prologue_and_epilogue = true;
2168   // Claim locks for common data structures, then call gc_prologue_work()
2169   // for each CMSGen.
2170 
2171   getFreelistLocks();   // gets free list locks on constituent spaces
2172   bitMapLock()->lock_without_safepoint_check();
2173 
2174   // Should call gc_prologue_work() for all cms gens we are responsible for
2175   bool duringMarking =    _collectorState >= Marking
2176                          && _collectorState < Sweeping;
2177 
2178   // The young collections clear the modified oops state, which tells if
2179   // there are any modified oops in the class. The remark phase also needs
2180   // that information. Tell the young collection to save the union of all
2181   // modified klasses.
2182   if (duringMarking) {
2183     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2184   }
2185 
2186   bool registerClosure = duringMarking;
2187 
2188   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2189                                                &_modUnionClosurePar
2190                                                : &_modUnionClosure;
2191   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2192 
2193   if (!full) {
2194     stats().record_gc0_begin();
2195   }
2196 }
2197 
2198 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2199 
2200   _capacity_at_prologue = capacity();
2201   _used_at_prologue = used();
2202 
2203   // Delegate to CMScollector which knows how to coordinate between
2204   // this and any other CMS generations that it is responsible for
2205   // collecting.
2206   collector()->gc_prologue(full);
2207 }
2208 
2209 // This is a "private" interface for use by this generation's CMSCollector.
2210 // Not to be called directly by any other entity (for instance,
2211 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2212 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2213   bool registerClosure, ModUnionClosure* modUnionClosure) {
2214   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2215   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2216     "Should be NULL");
2217   if (registerClosure) {
2218     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2219   }
2220   cmsSpace()->gc_prologue();
2221   // Clear stat counters
2222   NOT_PRODUCT(
2223     assert(_numObjectsPromoted == 0, "check");
2224     assert(_numWordsPromoted   == 0, "check");
2225     if (Verbose && PrintGC) {
2226       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2227                           SIZE_FORMAT" bytes concurrently",
2228       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2229     }
2230     _numObjectsAllocated = 0;
2231     _numWordsAllocated   = 0;
2232   )
2233 }
2234 
2235 void CMSCollector::gc_epilogue(bool full) {
2236   // The following locking discipline assumes that we are only called
2237   // when the world is stopped.
2238   assert(SafepointSynchronize::is_at_safepoint(),
2239          "world is stopped assumption");
2240 
2241   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2242   // if linear allocation blocks need to be appropriately marked to allow the
2243   // the blocks to be parsable. We also check here whether we need to nudge the
2244   // CMS collector thread to start a new cycle (if it's not already active).
2245   assert(   Thread::current()->is_VM_thread()
2246          || (   CMSScavengeBeforeRemark
2247              && Thread::current()->is_ConcurrentGC_thread()),
2248          "Incorrect thread type for epilogue execution");
2249 
2250   if (!_between_prologue_and_epilogue) {
2251     // We have already been invoked; this is a gc_epilogue delegation
2252     // from yet another CMS generation that we are responsible for, just
2253     // ignore it since all relevant work has already been done.
2254     return;
2255   }
2256   assert(haveFreelistLocks(), "must have freelist locks");
2257   assert_lock_strong(bitMapLock());
2258 
2259   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2260 
2261   _cmsGen->gc_epilogue_work(full);
2262 
2263   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2264     // in case sampling was not already enabled, enable it
2265     _start_sampling = true;
2266   }
2267   // reset _eden_chunk_array so sampling starts afresh
2268   _eden_chunk_index = 0;
2269 
2270   size_t cms_used   = _cmsGen->cmsSpace()->used();
2271 
2272   // update performance counters - this uses a special version of
2273   // update_counters() that allows the utilization to be passed as a
2274   // parameter, avoiding multiple calls to used().
2275   //
2276   _cmsGen->update_counters(cms_used);
2277 
2278   bitMapLock()->unlock();
2279   releaseFreelistLocks();
2280 
2281   if (!CleanChunkPoolAsync) {
2282     Chunk::clean_chunk_pool();
2283   }
2284 
2285   set_did_compact(false);
2286   _between_prologue_and_epilogue = false;  // ready for next cycle
2287 }
2288 
2289 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2290   collector()->gc_epilogue(full);
2291 
2292   // Also reset promotion tracking in par gc thread states.
2293   for (uint i = 0; i < ParallelGCThreads; i++) {
2294     _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2295   }
2296 }
2297 
2298 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2299   assert(!incremental_collection_failed(), "Should have been cleared");
2300   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2301   cmsSpace()->gc_epilogue();
2302     // Print stat counters
2303   NOT_PRODUCT(
2304     assert(_numObjectsAllocated == 0, "check");
2305     assert(_numWordsAllocated == 0, "check");
2306     if (Verbose && PrintGC) {
2307       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2308                           SIZE_FORMAT" bytes",
2309                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2310     }
2311     _numObjectsPromoted = 0;
2312     _numWordsPromoted   = 0;
2313   )
2314 
2315   if (PrintGC && Verbose) {
2316     // Call down the chain in contiguous_available needs the freelistLock
2317     // so print this out before releasing the freeListLock.
2318     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2319                         contiguous_available());
2320   }
2321 }
2322 
2323 #ifndef PRODUCT
2324 bool CMSCollector::have_cms_token() {
2325   Thread* thr = Thread::current();
2326   if (thr->is_VM_thread()) {
2327     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2328   } else if (thr->is_ConcurrentGC_thread()) {
2329     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2330   } else if (thr->is_GC_task_thread()) {
2331     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2332            ParGCRareEvent_lock->owned_by_self();
2333   }
2334   return false;
2335 }
2336 #endif
2337 
2338 // Check reachability of the given heap address in CMS generation,
2339 // treating all other generations as roots.
2340 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2341   // We could "guarantee" below, rather than assert, but I'll
2342   // leave these as "asserts" so that an adventurous debugger
2343   // could try this in the product build provided some subset of
2344   // the conditions were met, provided they were interested in the
2345   // results and knew that the computation below wouldn't interfere
2346   // with other concurrent computations mutating the structures
2347   // being read or written.
2348   assert(SafepointSynchronize::is_at_safepoint(),
2349          "Else mutations in object graph will make answer suspect");
2350   assert(have_cms_token(), "Should hold cms token");
2351   assert(haveFreelistLocks(), "must hold free list locks");
2352   assert_lock_strong(bitMapLock());
2353 
2354   // Clear the marking bit map array before starting, but, just
2355   // for kicks, first report if the given address is already marked
2356   gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", addr,
2357                 _markBitMap.isMarked(addr) ? "" : " not");
2358 
2359   if (verify_after_remark()) {
2360     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2361     bool result = verification_mark_bm()->isMarked(addr);
2362     gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", addr,
2363                            result ? "IS" : "is NOT");
2364     return result;
2365   } else {
2366     gclog_or_tty->print_cr("Could not compute result");
2367     return false;
2368   }
2369 }
2370 
2371 
2372 void
2373 CMSCollector::print_on_error(outputStream* st) {
2374   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2375   if (collector != NULL) {
2376     CMSBitMap* bitmap = &collector->_markBitMap;
2377     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2378     bitmap->print_on_error(st, " Bits: ");
2379 
2380     st->cr();
2381 
2382     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2383     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2384     mut_bitmap->print_on_error(st, " Bits: ");
2385   }
2386 }
2387 
2388 ////////////////////////////////////////////////////////
2389 // CMS Verification Support
2390 ////////////////////////////////////////////////////////
2391 // Following the remark phase, the following invariant
2392 // should hold -- each object in the CMS heap which is
2393 // marked in markBitMap() should be marked in the verification_mark_bm().
2394 
2395 class VerifyMarkedClosure: public BitMapClosure {
2396   CMSBitMap* _marks;
2397   bool       _failed;
2398 
2399  public:
2400   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2401 
2402   bool do_bit(size_t offset) {
2403     HeapWord* addr = _marks->offsetToHeapWord(offset);
2404     if (!_marks->isMarked(addr)) {
2405       oop(addr)->print_on(gclog_or_tty);
2406       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2407       _failed = true;
2408     }
2409     return true;
2410   }
2411 
2412   bool failed() { return _failed; }
2413 };
2414 
2415 bool CMSCollector::verify_after_remark(bool silent) {
2416   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2417   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2418   static bool init = false;
2419 
2420   assert(SafepointSynchronize::is_at_safepoint(),
2421          "Else mutations in object graph will make answer suspect");
2422   assert(have_cms_token(),
2423          "Else there may be mutual interference in use of "
2424          " verification data structures");
2425   assert(_collectorState > Marking && _collectorState <= Sweeping,
2426          "Else marking info checked here may be obsolete");
2427   assert(haveFreelistLocks(), "must hold free list locks");
2428   assert_lock_strong(bitMapLock());
2429 
2430 
2431   // Allocate marking bit map if not already allocated
2432   if (!init) { // first time
2433     if (!verification_mark_bm()->allocate(_span)) {
2434       return false;
2435     }
2436     init = true;
2437   }
2438 
2439   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2440 
2441   // Turn off refs discovery -- so we will be tracing through refs.
2442   // This is as intended, because by this time
2443   // GC must already have cleared any refs that need to be cleared,
2444   // and traced those that need to be marked; moreover,
2445   // the marking done here is not going to interfere in any
2446   // way with the marking information used by GC.
2447   NoRefDiscovery no_discovery(ref_processor());
2448 
2449   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2450 
2451   // Clear any marks from a previous round
2452   verification_mark_bm()->clear_all();
2453   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2454   verify_work_stacks_empty();
2455 
2456   GenCollectedHeap* gch = GenCollectedHeap::heap();
2457   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2458   // Update the saved marks which may affect the root scans.
2459   gch->save_marks();
2460 
2461   if (CMSRemarkVerifyVariant == 1) {
2462     // In this first variant of verification, we complete
2463     // all marking, then check if the new marks-vector is
2464     // a subset of the CMS marks-vector.
2465     verify_after_remark_work_1();
2466   } else if (CMSRemarkVerifyVariant == 2) {
2467     // In this second variant of verification, we flag an error
2468     // (i.e. an object reachable in the new marks-vector not reachable
2469     // in the CMS marks-vector) immediately, also indicating the
2470     // identify of an object (A) that references the unmarked object (B) --
2471     // presumably, a mutation to A failed to be picked up by preclean/remark?
2472     verify_after_remark_work_2();
2473   } else {
2474     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2475             CMSRemarkVerifyVariant);
2476   }
2477   if (!silent) gclog_or_tty->print(" done] ");
2478   return true;
2479 }
2480 
2481 void CMSCollector::verify_after_remark_work_1() {
2482   ResourceMark rm;
2483   HandleMark  hm;
2484   GenCollectedHeap* gch = GenCollectedHeap::heap();
2485 
2486   // Get a clear set of claim bits for the roots processing to work with.
2487   ClassLoaderDataGraph::clear_claimed_marks();
2488 
2489   // Mark from roots one level into CMS
2490   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2491   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2492 
2493   gch->gen_process_roots(_cmsGen->level(),
2494                          true,   // younger gens are roots
2495                          true,   // activate StrongRootsScope
2496                          SharedHeap::ScanningOption(roots_scanning_options()),
2497                          should_unload_classes(),
2498                          &notOlder,
2499                          NULL,
2500                          NULL);  // SSS: Provide correct closure
2501 
2502   // Now mark from the roots
2503   MarkFromRootsClosure markFromRootsClosure(this, _span,
2504     verification_mark_bm(), verification_mark_stack(),
2505     false /* don't yield */, true /* verifying */);
2506   assert(_restart_addr == NULL, "Expected pre-condition");
2507   verification_mark_bm()->iterate(&markFromRootsClosure);
2508   while (_restart_addr != NULL) {
2509     // Deal with stack overflow: by restarting at the indicated
2510     // address.
2511     HeapWord* ra = _restart_addr;
2512     markFromRootsClosure.reset(ra);
2513     _restart_addr = NULL;
2514     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2515   }
2516   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2517   verify_work_stacks_empty();
2518 
2519   // Marking completed -- now verify that each bit marked in
2520   // verification_mark_bm() is also marked in markBitMap(); flag all
2521   // errors by printing corresponding objects.
2522   VerifyMarkedClosure vcl(markBitMap());
2523   verification_mark_bm()->iterate(&vcl);
2524   if (vcl.failed()) {
2525     gclog_or_tty->print("Verification failed");
2526     Universe::heap()->print_on(gclog_or_tty);
2527     fatal("CMS: failed marking verification after remark");
2528   }
2529 }
2530 
2531 class VerifyKlassOopsKlassClosure : public KlassClosure {
2532   class VerifyKlassOopsClosure : public OopClosure {
2533     CMSBitMap* _bitmap;
2534    public:
2535     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2536     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2537     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2538   } _oop_closure;
2539  public:
2540   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2541   void do_klass(Klass* k) {
2542     k->oops_do(&_oop_closure);
2543   }
2544 };
2545 
2546 void CMSCollector::verify_after_remark_work_2() {
2547   ResourceMark rm;
2548   HandleMark  hm;
2549   GenCollectedHeap* gch = GenCollectedHeap::heap();
2550 
2551   // Get a clear set of claim bits for the roots processing to work with.
2552   ClassLoaderDataGraph::clear_claimed_marks();
2553 
2554   // Mark from roots one level into CMS
2555   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2556                                      markBitMap());
2557   CLDToOopClosure cld_closure(&notOlder, true);
2558 
2559   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2560 
2561   gch->gen_process_roots(_cmsGen->level(),
2562                          true,   // younger gens are roots
2563                          true,   // activate StrongRootsScope
2564                          SharedHeap::ScanningOption(roots_scanning_options()),
2565                          should_unload_classes(),
2566                          &notOlder,
2567                          NULL,
2568                          &cld_closure);
2569 
2570   // Now mark from the roots
2571   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2572     verification_mark_bm(), markBitMap(), verification_mark_stack());
2573   assert(_restart_addr == NULL, "Expected pre-condition");
2574   verification_mark_bm()->iterate(&markFromRootsClosure);
2575   while (_restart_addr != NULL) {
2576     // Deal with stack overflow: by restarting at the indicated
2577     // address.
2578     HeapWord* ra = _restart_addr;
2579     markFromRootsClosure.reset(ra);
2580     _restart_addr = NULL;
2581     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2582   }
2583   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2584   verify_work_stacks_empty();
2585 
2586   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2587   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2588 
2589   // Marking completed -- now verify that each bit marked in
2590   // verification_mark_bm() is also marked in markBitMap(); flag all
2591   // errors by printing corresponding objects.
2592   VerifyMarkedClosure vcl(markBitMap());
2593   verification_mark_bm()->iterate(&vcl);
2594   assert(!vcl.failed(), "Else verification above should not have succeeded");
2595 }
2596 
2597 void ConcurrentMarkSweepGeneration::save_marks() {
2598   // delegate to CMS space
2599   cmsSpace()->save_marks();
2600   for (uint i = 0; i < ParallelGCThreads; i++) {
2601     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2602   }
2603 }
2604 
2605 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2606   return cmsSpace()->no_allocs_since_save_marks();
2607 }
2608 
2609 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2610                                                                 \
2611 void ConcurrentMarkSweepGeneration::                            \
2612 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2613   cl->set_generation(this);                                     \
2614   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2615   cl->reset_generation();                                       \
2616   save_marks();                                                 \
2617 }
2618 
2619 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2620 
2621 void
2622 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2623   if (freelistLock()->owned_by_self()) {
2624     Generation::oop_iterate(cl);
2625   } else {
2626     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2627     Generation::oop_iterate(cl);
2628   }
2629 }
2630 
2631 void
2632 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2633   if (freelistLock()->owned_by_self()) {
2634     Generation::object_iterate(cl);
2635   } else {
2636     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2637     Generation::object_iterate(cl);
2638   }
2639 }
2640 
2641 void
2642 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2643   if (freelistLock()->owned_by_self()) {
2644     Generation::safe_object_iterate(cl);
2645   } else {
2646     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2647     Generation::safe_object_iterate(cl);
2648   }
2649 }
2650 
2651 void
2652 ConcurrentMarkSweepGeneration::post_compact() {
2653 }
2654 
2655 void
2656 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2657   // Fix the linear allocation blocks to look like free blocks.
2658 
2659   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2660   // are not called when the heap is verified during universe initialization and
2661   // at vm shutdown.
2662   if (freelistLock()->owned_by_self()) {
2663     cmsSpace()->prepare_for_verify();
2664   } else {
2665     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2666     cmsSpace()->prepare_for_verify();
2667   }
2668 }
2669 
2670 void
2671 ConcurrentMarkSweepGeneration::verify() {
2672   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2673   // are not called when the heap is verified during universe initialization and
2674   // at vm shutdown.
2675   if (freelistLock()->owned_by_self()) {
2676     cmsSpace()->verify();
2677   } else {
2678     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2679     cmsSpace()->verify();
2680   }
2681 }
2682 
2683 void CMSCollector::verify() {
2684   _cmsGen->verify();
2685 }
2686 
2687 #ifndef PRODUCT
2688 bool CMSCollector::overflow_list_is_empty() const {
2689   assert(_num_par_pushes >= 0, "Inconsistency");
2690   if (_overflow_list == NULL) {
2691     assert(_num_par_pushes == 0, "Inconsistency");
2692   }
2693   return _overflow_list == NULL;
2694 }
2695 
2696 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2697 // merely consolidate assertion checks that appear to occur together frequently.
2698 void CMSCollector::verify_work_stacks_empty() const {
2699   assert(_markStack.isEmpty(), "Marking stack should be empty");
2700   assert(overflow_list_is_empty(), "Overflow list should be empty");
2701 }
2702 
2703 void CMSCollector::verify_overflow_empty() const {
2704   assert(overflow_list_is_empty(), "Overflow list should be empty");
2705   assert(no_preserved_marks(), "No preserved marks");
2706 }
2707 #endif // PRODUCT
2708 
2709 // Decide if we want to enable class unloading as part of the
2710 // ensuing concurrent GC cycle. We will collect and
2711 // unload classes if it's the case that:
2712 // (1) an explicit gc request has been made and the flag
2713 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2714 // (2) (a) class unloading is enabled at the command line, and
2715 //     (b) old gen is getting really full
2716 // NOTE: Provided there is no change in the state of the heap between
2717 // calls to this method, it should have idempotent results. Moreover,
2718 // its results should be monotonically increasing (i.e. going from 0 to 1,
2719 // but not 1 to 0) between successive calls between which the heap was
2720 // not collected. For the implementation below, it must thus rely on
2721 // the property that concurrent_cycles_since_last_unload()
2722 // will not decrease unless a collection cycle happened and that
2723 // _cmsGen->is_too_full() are
2724 // themselves also monotonic in that sense. See check_monotonicity()
2725 // below.
2726 void CMSCollector::update_should_unload_classes() {
2727   _should_unload_classes = false;
2728   // Condition 1 above
2729   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2730     _should_unload_classes = true;
2731   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2732     // Disjuncts 2.b.(i,ii,iii) above
2733     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2734                               CMSClassUnloadingMaxInterval)
2735                            || _cmsGen->is_too_full();
2736   }
2737 }
2738 
2739 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2740   bool res = should_concurrent_collect();
2741   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2742   return res;
2743 }
2744 
2745 void CMSCollector::setup_cms_unloading_and_verification_state() {
2746   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2747                              || VerifyBeforeExit;
2748   const  int  rso           =   SharedHeap::SO_AllCodeCache;
2749 
2750   // We set the proper root for this CMS cycle here.
2751   if (should_unload_classes()) {   // Should unload classes this cycle
2752     remove_root_scanning_option(rso);  // Shrink the root set appropriately
2753     set_verifying(should_verify);    // Set verification state for this cycle
2754     return;                            // Nothing else needs to be done at this time
2755   }
2756 
2757   // Not unloading classes this cycle
2758   assert(!should_unload_classes(), "Inconsistency!");
2759 
2760   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2761     // Include symbols, strings and code cache elements to prevent their resurrection.
2762     add_root_scanning_option(rso);
2763     set_verifying(true);
2764   } else if (verifying() && !should_verify) {
2765     // We were verifying, but some verification flags got disabled.
2766     set_verifying(false);
2767     // Exclude symbols, strings and code cache elements from root scanning to
2768     // reduce IM and RM pauses.
2769     remove_root_scanning_option(rso);
2770   }
2771 }
2772 
2773 
2774 #ifndef PRODUCT
2775 HeapWord* CMSCollector::block_start(const void* p) const {
2776   const HeapWord* addr = (HeapWord*)p;
2777   if (_span.contains(p)) {
2778     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2779       return _cmsGen->cmsSpace()->block_start(p);
2780     }
2781   }
2782   return NULL;
2783 }
2784 #endif
2785 
2786 HeapWord*
2787 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2788                                                    bool   tlab,
2789                                                    bool   parallel) {
2790   CMSSynchronousYieldRequest yr;
2791   assert(!tlab, "Can't deal with TLAB allocation");
2792   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2793   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2794   if (GCExpandToAllocateDelayMillis > 0) {
2795     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2796   }
2797   return have_lock_and_allocate(word_size, tlab);
2798 }
2799 
2800 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2801     size_t bytes,
2802     size_t expand_bytes,
2803     CMSExpansionCause::Cause cause)
2804 {
2805 
2806   bool success = expand(bytes, expand_bytes);
2807 
2808   // remember why we expanded; this information is used
2809   // by shouldConcurrentCollect() when making decisions on whether to start
2810   // a new CMS cycle.
2811   if (success) {
2812     set_expansion_cause(cause);
2813     if (PrintGCDetails && Verbose) {
2814       gclog_or_tty->print_cr("Expanded CMS gen for %s",
2815         CMSExpansionCause::to_string(cause));
2816     }
2817   }
2818 }
2819 
2820 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2821   HeapWord* res = NULL;
2822   MutexLocker x(ParGCRareEvent_lock);
2823   while (true) {
2824     // Expansion by some other thread might make alloc OK now:
2825     res = ps->lab.alloc(word_sz);
2826     if (res != NULL) return res;
2827     // If there's not enough expansion space available, give up.
2828     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2829       return NULL;
2830     }
2831     // Otherwise, we try expansion.
2832     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2833     // Now go around the loop and try alloc again;
2834     // A competing par_promote might beat us to the expansion space,
2835     // so we may go around the loop again if promotion fails again.
2836     if (GCExpandToAllocateDelayMillis > 0) {
2837       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2838     }
2839   }
2840 }
2841 
2842 
2843 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2844   PromotionInfo* promo) {
2845   MutexLocker x(ParGCRareEvent_lock);
2846   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2847   while (true) {
2848     // Expansion by some other thread might make alloc OK now:
2849     if (promo->ensure_spooling_space()) {
2850       assert(promo->has_spooling_space(),
2851              "Post-condition of successful ensure_spooling_space()");
2852       return true;
2853     }
2854     // If there's not enough expansion space available, give up.
2855     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2856       return false;
2857     }
2858     // Otherwise, we try expansion.
2859     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2860     // Now go around the loop and try alloc again;
2861     // A competing allocation might beat us to the expansion space,
2862     // so we may go around the loop again if allocation fails again.
2863     if (GCExpandToAllocateDelayMillis > 0) {
2864       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2865     }
2866   }
2867 }
2868 
2869 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2870   // Only shrink if a compaction was done so that all the free space
2871   // in the generation is in a contiguous block at the end.
2872   if (did_compact()) {
2873     CardGeneration::shrink(bytes);
2874   }
2875 }
2876 
2877 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2878   assert_locked_or_safepoint(Heap_lock);
2879 }
2880 
2881 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2882   assert_locked_or_safepoint(Heap_lock);
2883   assert_lock_strong(freelistLock());
2884   if (PrintGCDetails && Verbose) {
2885     warning("Shrinking of CMS not yet implemented");
2886   }
2887   return;
2888 }
2889 
2890 
2891 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2892 // phases.
2893 class CMSPhaseAccounting: public StackObj {
2894  public:
2895   CMSPhaseAccounting(CMSCollector *collector,
2896                      const char *phase,
2897                      const GCId gc_id,
2898                      bool print_cr = true);
2899   ~CMSPhaseAccounting();
2900 
2901  private:
2902   CMSCollector *_collector;
2903   const char *_phase;
2904   elapsedTimer _wallclock;
2905   bool _print_cr;
2906   const GCId _gc_id;
2907 
2908  public:
2909   // Not MT-safe; so do not pass around these StackObj's
2910   // where they may be accessed by other threads.
2911   jlong wallclock_millis() {
2912     assert(_wallclock.is_active(), "Wall clock should not stop");
2913     _wallclock.stop();  // to record time
2914     jlong ret = _wallclock.milliseconds();
2915     _wallclock.start(); // restart
2916     return ret;
2917   }
2918 };
2919 
2920 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2921                                        const char *phase,
2922                                        const GCId gc_id,
2923                                        bool print_cr) :
2924   _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
2925 
2926   if (PrintCMSStatistics != 0) {
2927     _collector->resetYields();
2928   }
2929   if (PrintGCDetails) {
2930     gclog_or_tty->gclog_stamp(_gc_id);
2931     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2932       _collector->cmsGen()->short_name(), _phase);
2933   }
2934   _collector->resetTimer();
2935   _wallclock.start();
2936   _collector->startTimer();
2937 }
2938 
2939 CMSPhaseAccounting::~CMSPhaseAccounting() {
2940   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2941   _collector->stopTimer();
2942   _wallclock.stop();
2943   if (PrintGCDetails) {
2944     gclog_or_tty->gclog_stamp(_gc_id);
2945     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2946                  _collector->cmsGen()->short_name(),
2947                  _phase, _collector->timerValue(), _wallclock.seconds());
2948     if (_print_cr) {
2949       gclog_or_tty->cr();
2950     }
2951     if (PrintCMSStatistics != 0) {
2952       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2953                     _collector->yields());
2954     }
2955   }
2956 }
2957 
2958 // CMS work
2959 
2960 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2961 class CMSParMarkTask : public AbstractGangTask {
2962  protected:
2963   CMSCollector*     _collector;
2964   int               _n_workers;
2965   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
2966       AbstractGangTask(name),
2967       _collector(collector),
2968       _n_workers(n_workers) {}
2969   // Work method in support of parallel rescan ... of young gen spaces
2970   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2971                              ContiguousSpace* space,
2972                              HeapWord** chunk_array, size_t chunk_top);
2973   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2974 };
2975 
2976 // Parallel initial mark task
2977 class CMSParInitialMarkTask: public CMSParMarkTask {
2978  public:
2979   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
2980       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
2981                      collector, n_workers) {}
2982   void work(uint worker_id);
2983 };
2984 
2985 // Checkpoint the roots into this generation from outside
2986 // this generation. [Note this initial checkpoint need only
2987 // be approximate -- we'll do a catch up phase subsequently.]
2988 void CMSCollector::checkpointRootsInitial() {
2989   assert(_collectorState == InitialMarking, "Wrong collector state");
2990   check_correct_thread_executing();
2991   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2992 
2993   save_heap_summary();
2994   report_heap_summary(GCWhen::BeforeGC);
2995 
2996   ReferenceProcessor* rp = ref_processor();
2997   SpecializationStats::clear();
2998   assert(_restart_addr == NULL, "Control point invariant");
2999   {
3000     // acquire locks for subsequent manipulations
3001     MutexLockerEx x(bitMapLock(),
3002                     Mutex::_no_safepoint_check_flag);
3003     checkpointRootsInitialWork();
3004     // enable ("weak") refs discovery
3005     rp->enable_discovery();
3006     _collectorState = Marking;
3007   }
3008   SpecializationStats::print();
3009 }
3010 
3011 void CMSCollector::checkpointRootsInitialWork() {
3012   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3013   assert(_collectorState == InitialMarking, "just checking");
3014 
3015   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3016   // precede our marking with a collection of all
3017   // younger generations to keep floating garbage to a minimum.
3018   // XXX: we won't do this for now -- it's an optimization to be done later.
3019 
3020   // already have locks
3021   assert_lock_strong(bitMapLock());
3022   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3023 
3024   // Setup the verification and class unloading state for this
3025   // CMS collection cycle.
3026   setup_cms_unloading_and_verification_state();
3027 
3028   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3029     PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
3030 
3031   // Reset all the PLAB chunk arrays if necessary.
3032   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3033     reset_survivor_plab_arrays();
3034   }
3035 
3036   ResourceMark rm;
3037   HandleMark  hm;
3038 
3039   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3040   GenCollectedHeap* gch = GenCollectedHeap::heap();
3041 
3042   verify_work_stacks_empty();
3043   verify_overflow_empty();
3044 
3045   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3046   // Update the saved marks which may affect the root scans.
3047   gch->save_marks();
3048 
3049   // weak reference processing has not started yet.
3050   ref_processor()->set_enqueuing_is_done(false);
3051 
3052   // Need to remember all newly created CLDs,
3053   // so that we can guarantee that the remark finds them.
3054   ClassLoaderDataGraph::remember_new_clds(true);
3055 
3056   // Whenever a CLD is found, it will be claimed before proceeding to mark
3057   // the klasses. The claimed marks need to be cleared before marking starts.
3058   ClassLoaderDataGraph::clear_claimed_marks();
3059 
3060   if (CMSPrintEdenSurvivorChunks) {
3061     print_eden_and_survivor_chunk_arrays();
3062   }
3063 
3064   {
3065     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3066     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3067       // The parallel version.
3068       FlexibleWorkGang* workers = gch->workers();
3069       assert(workers != NULL, "Need parallel worker threads.");
3070       int n_workers = workers->active_workers();
3071       CMSParInitialMarkTask tsk(this, n_workers);
3072       gch->set_par_threads(n_workers);
3073       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3074       if (n_workers > 1) {
3075         GenCollectedHeap::StrongRootsScope srs(gch);
3076         workers->run_task(&tsk);
3077       } else {
3078         GenCollectedHeap::StrongRootsScope srs(gch);
3079         tsk.work(0);
3080       }
3081       gch->set_par_threads(0);
3082     } else {
3083       // The serial version.
3084       CLDToOopClosure cld_closure(&notOlder, true);
3085       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3086       gch->gen_process_roots(_cmsGen->level(),
3087                              true,   // younger gens are roots
3088                              true,   // activate StrongRootsScope
3089                              SharedHeap::ScanningOption(roots_scanning_options()),
3090                              should_unload_classes(),
3091                              &notOlder,
3092                              NULL,
3093                              &cld_closure);
3094     }
3095   }
3096 
3097   // Clear mod-union table; it will be dirtied in the prologue of
3098   // CMS generation per each younger generation collection.
3099 
3100   assert(_modUnionTable.isAllClear(),
3101        "Was cleared in most recent final checkpoint phase"
3102        " or no bits are set in the gc_prologue before the start of the next "
3103        "subsequent marking phase.");
3104 
3105   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3106 
3107   // Save the end of the used_region of the constituent generations
3108   // to be used to limit the extent of sweep in each generation.
3109   save_sweep_limits();
3110   verify_overflow_empty();
3111 }
3112 
3113 bool CMSCollector::markFromRoots() {
3114   // we might be tempted to assert that:
3115   // assert(!SafepointSynchronize::is_at_safepoint(),
3116   //        "inconsistent argument?");
3117   // However that wouldn't be right, because it's possible that
3118   // a safepoint is indeed in progress as a younger generation
3119   // stop-the-world GC happens even as we mark in this generation.
3120   assert(_collectorState == Marking, "inconsistent state?");
3121   check_correct_thread_executing();
3122   verify_overflow_empty();
3123 
3124   // Weak ref discovery note: We may be discovering weak
3125   // refs in this generation concurrent (but interleaved) with
3126   // weak ref discovery by a younger generation collector.
3127 
3128   CMSTokenSyncWithLocks ts(true, bitMapLock());
3129   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3130   CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3131   bool res = markFromRootsWork();
3132   if (res) {
3133     _collectorState = Precleaning;
3134   } else { // We failed and a foreground collection wants to take over
3135     assert(_foregroundGCIsActive, "internal state inconsistency");
3136     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3137     if (PrintGCDetails) {
3138       gclog_or_tty->print_cr("bailing out to foreground collection");
3139     }
3140   }
3141   verify_overflow_empty();
3142   return res;
3143 }
3144 
3145 bool CMSCollector::markFromRootsWork() {
3146   // iterate over marked bits in bit map, doing a full scan and mark
3147   // from these roots using the following algorithm:
3148   // . if oop is to the right of the current scan pointer,
3149   //   mark corresponding bit (we'll process it later)
3150   // . else (oop is to left of current scan pointer)
3151   //   push oop on marking stack
3152   // . drain the marking stack
3153 
3154   // Note that when we do a marking step we need to hold the
3155   // bit map lock -- recall that direct allocation (by mutators)
3156   // and promotion (by younger generation collectors) is also
3157   // marking the bit map. [the so-called allocate live policy.]
3158   // Because the implementation of bit map marking is not
3159   // robust wrt simultaneous marking of bits in the same word,
3160   // we need to make sure that there is no such interference
3161   // between concurrent such updates.
3162 
3163   // already have locks
3164   assert_lock_strong(bitMapLock());
3165 
3166   verify_work_stacks_empty();
3167   verify_overflow_empty();
3168   bool result = false;
3169   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3170     result = do_marking_mt();
3171   } else {
3172     result = do_marking_st();
3173   }
3174   return result;
3175 }
3176 
3177 // Forward decl
3178 class CMSConcMarkingTask;
3179 
3180 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3181   CMSCollector*       _collector;
3182   CMSConcMarkingTask* _task;
3183  public:
3184   virtual void yield();
3185 
3186   // "n_threads" is the number of threads to be terminated.
3187   // "queue_set" is a set of work queues of other threads.
3188   // "collector" is the CMS collector associated with this task terminator.
3189   // "yield" indicates whether we need the gang as a whole to yield.
3190   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3191     ParallelTaskTerminator(n_threads, queue_set),
3192     _collector(collector) { }
3193 
3194   void set_task(CMSConcMarkingTask* task) {
3195     _task = task;
3196   }
3197 };
3198 
3199 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3200   CMSConcMarkingTask* _task;
3201  public:
3202   bool should_exit_termination();
3203   void set_task(CMSConcMarkingTask* task) {
3204     _task = task;
3205   }
3206 };
3207 
3208 // MT Concurrent Marking Task
3209 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3210   CMSCollector* _collector;
3211   int           _n_workers;                  // requested/desired # workers
3212   bool          _result;
3213   CompactibleFreeListSpace*  _cms_space;
3214   char          _pad_front[64];   // padding to ...
3215   HeapWord*     _global_finger;   // ... avoid sharing cache line
3216   char          _pad_back[64];
3217   HeapWord*     _restart_addr;
3218 
3219   //  Exposed here for yielding support
3220   Mutex* const _bit_map_lock;
3221 
3222   // The per thread work queues, available here for stealing
3223   OopTaskQueueSet*  _task_queues;
3224 
3225   // Termination (and yielding) support
3226   CMSConcMarkingTerminator _term;
3227   CMSConcMarkingTerminatorTerminator _term_term;
3228 
3229  public:
3230   CMSConcMarkingTask(CMSCollector* collector,
3231                  CompactibleFreeListSpace* cms_space,
3232                  YieldingFlexibleWorkGang* workers,
3233                  OopTaskQueueSet* task_queues):
3234     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3235     _collector(collector),
3236     _cms_space(cms_space),
3237     _n_workers(0), _result(true),
3238     _task_queues(task_queues),
3239     _term(_n_workers, task_queues, _collector),
3240     _bit_map_lock(collector->bitMapLock())
3241   {
3242     _requested_size = _n_workers;
3243     _term.set_task(this);
3244     _term_term.set_task(this);
3245     _restart_addr = _global_finger = _cms_space->bottom();
3246   }
3247 
3248 
3249   OopTaskQueueSet* task_queues()  { return _task_queues; }
3250 
3251   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3252 
3253   HeapWord** global_finger_addr() { return &_global_finger; }
3254 
3255   CMSConcMarkingTerminator* terminator() { return &_term; }
3256 
3257   virtual void set_for_termination(int active_workers) {
3258     terminator()->reset_for_reuse(active_workers);
3259   }
3260 
3261   void work(uint worker_id);
3262   bool should_yield() {
3263     return    ConcurrentMarkSweepThread::should_yield()
3264            && !_collector->foregroundGCIsActive();
3265   }
3266 
3267   virtual void coordinator_yield();  // stuff done by coordinator
3268   bool result() { return _result; }
3269 
3270   void reset(HeapWord* ra) {
3271     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3272     _restart_addr = _global_finger = ra;
3273     _term.reset_for_reuse();
3274   }
3275 
3276   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3277                                            OopTaskQueue* work_q);
3278 
3279  private:
3280   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3281   void do_work_steal(int i);
3282   void bump_global_finger(HeapWord* f);
3283 };
3284 
3285 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3286   assert(_task != NULL, "Error");
3287   return _task->yielding();
3288   // Note that we do not need the disjunct || _task->should_yield() above
3289   // because we want terminating threads to yield only if the task
3290   // is already in the midst of yielding, which happens only after at least one
3291   // thread has yielded.
3292 }
3293 
3294 void CMSConcMarkingTerminator::yield() {
3295   if (_task->should_yield()) {
3296     _task->yield();
3297   } else {
3298     ParallelTaskTerminator::yield();
3299   }
3300 }
3301 
3302 ////////////////////////////////////////////////////////////////
3303 // Concurrent Marking Algorithm Sketch
3304 ////////////////////////////////////////////////////////////////
3305 // Until all tasks exhausted (both spaces):
3306 // -- claim next available chunk
3307 // -- bump global finger via CAS
3308 // -- find first object that starts in this chunk
3309 //    and start scanning bitmap from that position
3310 // -- scan marked objects for oops
3311 // -- CAS-mark target, and if successful:
3312 //    . if target oop is above global finger (volatile read)
3313 //      nothing to do
3314 //    . if target oop is in chunk and above local finger
3315 //        then nothing to do
3316 //    . else push on work-queue
3317 // -- Deal with possible overflow issues:
3318 //    . local work-queue overflow causes stuff to be pushed on
3319 //      global (common) overflow queue
3320 //    . always first empty local work queue
3321 //    . then get a batch of oops from global work queue if any
3322 //    . then do work stealing
3323 // -- When all tasks claimed (both spaces)
3324 //    and local work queue empty,
3325 //    then in a loop do:
3326 //    . check global overflow stack; steal a batch of oops and trace
3327 //    . try to steal from other threads oif GOS is empty
3328 //    . if neither is available, offer termination
3329 // -- Terminate and return result
3330 //
3331 void CMSConcMarkingTask::work(uint worker_id) {
3332   elapsedTimer _timer;
3333   ResourceMark rm;
3334   HandleMark hm;
3335 
3336   DEBUG_ONLY(_collector->verify_overflow_empty();)
3337 
3338   // Before we begin work, our work queue should be empty
3339   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3340   // Scan the bitmap covering _cms_space, tracing through grey objects.
3341   _timer.start();
3342   do_scan_and_mark(worker_id, _cms_space);
3343   _timer.stop();
3344   if (PrintCMSStatistics != 0) {
3345     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3346       worker_id, _timer.seconds());
3347       // XXX: need xxx/xxx type of notation, two timers
3348   }
3349 
3350   // ... do work stealing
3351   _timer.reset();
3352   _timer.start();
3353   do_work_steal(worker_id);
3354   _timer.stop();
3355   if (PrintCMSStatistics != 0) {
3356     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3357       worker_id, _timer.seconds());
3358       // XXX: need xxx/xxx type of notation, two timers
3359   }
3360   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3361   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3362   // Note that under the current task protocol, the
3363   // following assertion is true even of the spaces
3364   // expanded since the completion of the concurrent
3365   // marking. XXX This will likely change under a strict
3366   // ABORT semantics.
3367   // After perm removal the comparison was changed to
3368   // greater than or equal to from strictly greater than.
3369   // Before perm removal the highest address sweep would
3370   // have been at the end of perm gen but now is at the
3371   // end of the tenured gen.
3372   assert(_global_finger >=  _cms_space->end(),
3373          "All tasks have been completed");
3374   DEBUG_ONLY(_collector->verify_overflow_empty();)
3375 }
3376 
3377 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3378   HeapWord* read = _global_finger;
3379   HeapWord* cur  = read;
3380   while (f > read) {
3381     cur = read;
3382     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3383     if (cur == read) {
3384       // our cas succeeded
3385       assert(_global_finger >= f, "protocol consistency");
3386       break;
3387     }
3388   }
3389 }
3390 
3391 // This is really inefficient, and should be redone by
3392 // using (not yet available) block-read and -write interfaces to the
3393 // stack and the work_queue. XXX FIX ME !!!
3394 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3395                                                       OopTaskQueue* work_q) {
3396   // Fast lock-free check
3397   if (ovflw_stk->length() == 0) {
3398     return false;
3399   }
3400   assert(work_q->size() == 0, "Shouldn't steal");
3401   MutexLockerEx ml(ovflw_stk->par_lock(),
3402                    Mutex::_no_safepoint_check_flag);
3403   // Grab up to 1/4 the size of the work queue
3404   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3405                     (size_t)ParGCDesiredObjsFromOverflowList);
3406   num = MIN2(num, ovflw_stk->length());
3407   for (int i = (int) num; i > 0; i--) {
3408     oop cur = ovflw_stk->pop();
3409     assert(cur != NULL, "Counted wrong?");
3410     work_q->push(cur);
3411   }
3412   return num > 0;
3413 }
3414 
3415 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3416   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3417   int n_tasks = pst->n_tasks();
3418   // We allow that there may be no tasks to do here because
3419   // we are restarting after a stack overflow.
3420   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3421   uint nth_task = 0;
3422 
3423   HeapWord* aligned_start = sp->bottom();
3424   if (sp->used_region().contains(_restart_addr)) {
3425     // Align down to a card boundary for the start of 0th task
3426     // for this space.
3427     aligned_start =
3428       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3429                                  CardTableModRefBS::card_size);
3430   }
3431 
3432   size_t chunk_size = sp->marking_task_size();
3433   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3434     // Having claimed the nth task in this space,
3435     // compute the chunk that it corresponds to:
3436     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3437                                aligned_start + (nth_task+1)*chunk_size);
3438     // Try and bump the global finger via a CAS;
3439     // note that we need to do the global finger bump
3440     // _before_ taking the intersection below, because
3441     // the task corresponding to that region will be
3442     // deemed done even if the used_region() expands
3443     // because of allocation -- as it almost certainly will
3444     // during start-up while the threads yield in the
3445     // closure below.
3446     HeapWord* finger = span.end();
3447     bump_global_finger(finger);   // atomically
3448     // There are null tasks here corresponding to chunks
3449     // beyond the "top" address of the space.
3450     span = span.intersection(sp->used_region());
3451     if (!span.is_empty()) {  // Non-null task
3452       HeapWord* prev_obj;
3453       assert(!span.contains(_restart_addr) || nth_task == 0,
3454              "Inconsistency");
3455       if (nth_task == 0) {
3456         // For the 0th task, we'll not need to compute a block_start.
3457         if (span.contains(_restart_addr)) {
3458           // In the case of a restart because of stack overflow,
3459           // we might additionally skip a chunk prefix.
3460           prev_obj = _restart_addr;
3461         } else {
3462           prev_obj = span.start();
3463         }
3464       } else {
3465         // We want to skip the first object because
3466         // the protocol is to scan any object in its entirety
3467         // that _starts_ in this span; a fortiori, any
3468         // object starting in an earlier span is scanned
3469         // as part of an earlier claimed task.
3470         // Below we use the "careful" version of block_start
3471         // so we do not try to navigate uninitialized objects.
3472         prev_obj = sp->block_start_careful(span.start());
3473         // Below we use a variant of block_size that uses the
3474         // Printezis bits to avoid waiting for allocated
3475         // objects to become initialized/parsable.
3476         while (prev_obj < span.start()) {
3477           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3478           if (sz > 0) {
3479             prev_obj += sz;
3480           } else {
3481             // In this case we may end up doing a bit of redundant
3482             // scanning, but that appears unavoidable, short of
3483             // locking the free list locks; see bug 6324141.
3484             break;
3485           }
3486         }
3487       }
3488       if (prev_obj < span.end()) {
3489         MemRegion my_span = MemRegion(prev_obj, span.end());
3490         // Do the marking work within a non-empty span --
3491         // the last argument to the constructor indicates whether the
3492         // iteration should be incremental with periodic yields.
3493         Par_MarkFromRootsClosure cl(this, _collector, my_span,
3494                                     &_collector->_markBitMap,
3495                                     work_queue(i),
3496                                     &_collector->_markStack);
3497         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3498       } // else nothing to do for this task
3499     }   // else nothing to do for this task
3500   }
3501   // We'd be tempted to assert here that since there are no
3502   // more tasks left to claim in this space, the global_finger
3503   // must exceed space->top() and a fortiori space->end(). However,
3504   // that would not quite be correct because the bumping of
3505   // global_finger occurs strictly after the claiming of a task,
3506   // so by the time we reach here the global finger may not yet
3507   // have been bumped up by the thread that claimed the last
3508   // task.
3509   pst->all_tasks_completed();
3510 }
3511 
3512 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
3513  private:
3514   CMSCollector* _collector;
3515   CMSConcMarkingTask* _task;
3516   MemRegion     _span;
3517   CMSBitMap*    _bit_map;
3518   CMSMarkStack* _overflow_stack;
3519   OopTaskQueue* _work_queue;
3520  protected:
3521   DO_OOP_WORK_DEFN
3522  public:
3523   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3524                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3525     MetadataAwareOopClosure(collector->ref_processor()),
3526     _collector(collector),
3527     _task(task),
3528     _span(collector->_span),
3529     _work_queue(work_queue),
3530     _bit_map(bit_map),
3531     _overflow_stack(overflow_stack)
3532   { }
3533   virtual void do_oop(oop* p);
3534   virtual void do_oop(narrowOop* p);
3535 
3536   void trim_queue(size_t max);
3537   void handle_stack_overflow(HeapWord* lost);
3538   void do_yield_check() {
3539     if (_task->should_yield()) {
3540       _task->yield();
3541     }
3542   }
3543 };
3544 
3545 // Grey object scanning during work stealing phase --
3546 // the salient assumption here is that any references
3547 // that are in these stolen objects being scanned must
3548 // already have been initialized (else they would not have
3549 // been published), so we do not need to check for
3550 // uninitialized objects before pushing here.
3551 void Par_ConcMarkingClosure::do_oop(oop obj) {
3552   assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
3553   HeapWord* addr = (HeapWord*)obj;
3554   // Check if oop points into the CMS generation
3555   // and is not marked
3556   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3557     // a white object ...
3558     // If we manage to "claim" the object, by being the
3559     // first thread to mark it, then we push it on our
3560     // marking stack
3561     if (_bit_map->par_mark(addr)) {     // ... now grey
3562       // push on work queue (grey set)
3563       bool simulate_overflow = false;
3564       NOT_PRODUCT(
3565         if (CMSMarkStackOverflowALot &&
3566             _collector->simulate_overflow()) {
3567           // simulate a stack overflow
3568           simulate_overflow = true;
3569         }
3570       )
3571       if (simulate_overflow ||
3572           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3573         // stack overflow
3574         if (PrintCMSStatistics != 0) {
3575           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3576                                  SIZE_FORMAT, _overflow_stack->capacity());
3577         }
3578         // We cannot assert that the overflow stack is full because
3579         // it may have been emptied since.
3580         assert(simulate_overflow ||
3581                _work_queue->size() == _work_queue->max_elems(),
3582               "Else push should have succeeded");
3583         handle_stack_overflow(addr);
3584       }
3585     } // Else, some other thread got there first
3586     do_yield_check();
3587   }
3588 }
3589 
3590 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3591 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3592 
3593 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3594   while (_work_queue->size() > max) {
3595     oop new_oop;
3596     if (_work_queue->pop_local(new_oop)) {
3597       assert(new_oop->is_oop(), "Should be an oop");
3598       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3599       assert(_span.contains((HeapWord*)new_oop), "Not in span");
3600       new_oop->oop_iterate(this);  // do_oop() above
3601       do_yield_check();
3602     }
3603   }
3604 }
3605 
3606 // Upon stack overflow, we discard (part of) the stack,
3607 // remembering the least address amongst those discarded
3608 // in CMSCollector's _restart_address.
3609 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3610   // We need to do this under a mutex to prevent other
3611   // workers from interfering with the work done below.
3612   MutexLockerEx ml(_overflow_stack->par_lock(),
3613                    Mutex::_no_safepoint_check_flag);
3614   // Remember the least grey address discarded
3615   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3616   _collector->lower_restart_addr(ra);
3617   _overflow_stack->reset();  // discard stack contents
3618   _overflow_stack->expand(); // expand the stack if possible
3619 }
3620 
3621 
3622 void CMSConcMarkingTask::do_work_steal(int i) {
3623   OopTaskQueue* work_q = work_queue(i);
3624   oop obj_to_scan;
3625   CMSBitMap* bm = &(_collector->_markBitMap);
3626   CMSMarkStack* ovflw = &(_collector->_markStack);
3627   int* seed = _collector->hash_seed(i);
3628   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3629   while (true) {
3630     cl.trim_queue(0);
3631     assert(work_q->size() == 0, "Should have been emptied above");
3632     if (get_work_from_overflow_stack(ovflw, work_q)) {
3633       // Can't assert below because the work obtained from the
3634       // overflow stack may already have been stolen from us.
3635       // assert(work_q->size() > 0, "Work from overflow stack");
3636       continue;
3637     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3638       assert(obj_to_scan->is_oop(), "Should be an oop");
3639       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3640       obj_to_scan->oop_iterate(&cl);
3641     } else if (terminator()->offer_termination(&_term_term)) {
3642       assert(work_q->size() == 0, "Impossible!");
3643       break;
3644     } else if (yielding() || should_yield()) {
3645       yield();
3646     }
3647   }
3648 }
3649 
3650 // This is run by the CMS (coordinator) thread.
3651 void CMSConcMarkingTask::coordinator_yield() {
3652   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3653          "CMS thread should hold CMS token");
3654   // First give up the locks, then yield, then re-lock
3655   // We should probably use a constructor/destructor idiom to
3656   // do this unlock/lock or modify the MutexUnlocker class to
3657   // serve our purpose. XXX
3658   assert_lock_strong(_bit_map_lock);
3659   _bit_map_lock->unlock();
3660   ConcurrentMarkSweepThread::desynchronize(true);
3661   _collector->stopTimer();
3662   if (PrintCMSStatistics != 0) {
3663     _collector->incrementYields();
3664   }
3665 
3666   // It is possible for whichever thread initiated the yield request
3667   // not to get a chance to wake up and take the bitmap lock between
3668   // this thread releasing it and reacquiring it. So, while the
3669   // should_yield() flag is on, let's sleep for a bit to give the
3670   // other thread a chance to wake up. The limit imposed on the number
3671   // of iterations is defensive, to avoid any unforseen circumstances
3672   // putting us into an infinite loop. Since it's always been this
3673   // (coordinator_yield()) method that was observed to cause the
3674   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3675   // which is by default non-zero. For the other seven methods that
3676   // also perform the yield operation, as are using a different
3677   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3678   // can enable the sleeping for those methods too, if necessary.
3679   // See 6442774.
3680   //
3681   // We really need to reconsider the synchronization between the GC
3682   // thread and the yield-requesting threads in the future and we
3683   // should really use wait/notify, which is the recommended
3684   // way of doing this type of interaction. Additionally, we should
3685   // consolidate the eight methods that do the yield operation and they
3686   // are almost identical into one for better maintainability and
3687   // readability. See 6445193.
3688   //
3689   // Tony 2006.06.29
3690   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3691                    ConcurrentMarkSweepThread::should_yield() &&
3692                    !CMSCollector::foregroundGCIsActive(); ++i) {
3693     os::sleep(Thread::current(), 1, false);
3694   }
3695 
3696   ConcurrentMarkSweepThread::synchronize(true);
3697   _bit_map_lock->lock_without_safepoint_check();
3698   _collector->startTimer();
3699 }
3700 
3701 bool CMSCollector::do_marking_mt() {
3702   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3703   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
3704                                        conc_workers()->total_workers(),
3705                                        conc_workers()->active_workers(),
3706                                        Threads::number_of_non_daemon_threads());
3707   conc_workers()->set_active_workers(num_workers);
3708 
3709   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3710 
3711   CMSConcMarkingTask tsk(this,
3712                          cms_space,
3713                          conc_workers(),
3714                          task_queues());
3715 
3716   // Since the actual number of workers we get may be different
3717   // from the number we requested above, do we need to do anything different
3718   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3719   // class?? XXX
3720   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3721 
3722   // Refs discovery is already non-atomic.
3723   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3724   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3725   conc_workers()->start_task(&tsk);
3726   while (tsk.yielded()) {
3727     tsk.coordinator_yield();
3728     conc_workers()->continue_task(&tsk);
3729   }
3730   // If the task was aborted, _restart_addr will be non-NULL
3731   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3732   while (_restart_addr != NULL) {
3733     // XXX For now we do not make use of ABORTED state and have not
3734     // yet implemented the right abort semantics (even in the original
3735     // single-threaded CMS case). That needs some more investigation
3736     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3737     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3738     // If _restart_addr is non-NULL, a marking stack overflow
3739     // occurred; we need to do a fresh marking iteration from the
3740     // indicated restart address.
3741     if (_foregroundGCIsActive) {
3742       // We may be running into repeated stack overflows, having
3743       // reached the limit of the stack size, while making very
3744       // slow forward progress. It may be best to bail out and
3745       // let the foreground collector do its job.
3746       // Clear _restart_addr, so that foreground GC
3747       // works from scratch. This avoids the headache of
3748       // a "rescan" which would otherwise be needed because
3749       // of the dirty mod union table & card table.
3750       _restart_addr = NULL;
3751       return false;
3752     }
3753     // Adjust the task to restart from _restart_addr
3754     tsk.reset(_restart_addr);
3755     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3756                   _restart_addr);
3757     _restart_addr = NULL;
3758     // Get the workers going again
3759     conc_workers()->start_task(&tsk);
3760     while (tsk.yielded()) {
3761       tsk.coordinator_yield();
3762       conc_workers()->continue_task(&tsk);
3763     }
3764   }
3765   assert(tsk.completed(), "Inconsistency");
3766   assert(tsk.result() == true, "Inconsistency");
3767   return true;
3768 }
3769 
3770 bool CMSCollector::do_marking_st() {
3771   ResourceMark rm;
3772   HandleMark   hm;
3773 
3774   // Temporarily make refs discovery single threaded (non-MT)
3775   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3776   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3777     &_markStack, CMSYield);
3778   // the last argument to iterate indicates whether the iteration
3779   // should be incremental with periodic yields.
3780   _markBitMap.iterate(&markFromRootsClosure);
3781   // If _restart_addr is non-NULL, a marking stack overflow
3782   // occurred; we need to do a fresh iteration from the
3783   // indicated restart address.
3784   while (_restart_addr != NULL) {
3785     if (_foregroundGCIsActive) {
3786       // We may be running into repeated stack overflows, having
3787       // reached the limit of the stack size, while making very
3788       // slow forward progress. It may be best to bail out and
3789       // let the foreground collector do its job.
3790       // Clear _restart_addr, so that foreground GC
3791       // works from scratch. This avoids the headache of
3792       // a "rescan" which would otherwise be needed because
3793       // of the dirty mod union table & card table.
3794       _restart_addr = NULL;
3795       return false;  // indicating failure to complete marking
3796     }
3797     // Deal with stack overflow:
3798     // we restart marking from _restart_addr
3799     HeapWord* ra = _restart_addr;
3800     markFromRootsClosure.reset(ra);
3801     _restart_addr = NULL;
3802     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3803   }
3804   return true;
3805 }
3806 
3807 void CMSCollector::preclean() {
3808   check_correct_thread_executing();
3809   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3810   verify_work_stacks_empty();
3811   verify_overflow_empty();
3812   _abort_preclean = false;
3813   if (CMSPrecleaningEnabled) {
3814     if (!CMSEdenChunksRecordAlways) {
3815       _eden_chunk_index = 0;
3816     }
3817     size_t used = get_eden_used();
3818     size_t capacity = get_eden_capacity();
3819     // Don't start sampling unless we will get sufficiently
3820     // many samples.
3821     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3822                 * CMSScheduleRemarkEdenPenetration)) {
3823       _start_sampling = true;
3824     } else {
3825       _start_sampling = false;
3826     }
3827     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3828     CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3829     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3830   }
3831   CMSTokenSync x(true); // is cms thread
3832   if (CMSPrecleaningEnabled) {
3833     sample_eden();
3834     _collectorState = AbortablePreclean;
3835   } else {
3836     _collectorState = FinalMarking;
3837   }
3838   verify_work_stacks_empty();
3839   verify_overflow_empty();
3840 }
3841 
3842 // Try and schedule the remark such that young gen
3843 // occupancy is CMSScheduleRemarkEdenPenetration %.
3844 void CMSCollector::abortable_preclean() {
3845   check_correct_thread_executing();
3846   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3847   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3848 
3849   // If Eden's current occupancy is below this threshold,
3850   // immediately schedule the remark; else preclean
3851   // past the next scavenge in an effort to
3852   // schedule the pause as described above. By choosing
3853   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3854   // we will never do an actual abortable preclean cycle.
3855   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3856     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3857     CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3858     // We need more smarts in the abortable preclean
3859     // loop below to deal with cases where allocation
3860     // in young gen is very very slow, and our precleaning
3861     // is running a losing race against a horde of
3862     // mutators intent on flooding us with CMS updates
3863     // (dirty cards).
3864     // One, admittedly dumb, strategy is to give up
3865     // after a certain number of abortable precleaning loops
3866     // or after a certain maximum time. We want to make
3867     // this smarter in the next iteration.
3868     // XXX FIX ME!!! YSR
3869     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3870     while (!(should_abort_preclean() ||
3871              ConcurrentMarkSweepThread::should_terminate())) {
3872       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3873       cumworkdone += workdone;
3874       loops++;
3875       // Voluntarily terminate abortable preclean phase if we have
3876       // been at it for too long.
3877       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3878           loops >= CMSMaxAbortablePrecleanLoops) {
3879         if (PrintGCDetails) {
3880           gclog_or_tty->print(" CMS: abort preclean due to loops ");
3881         }
3882         break;
3883       }
3884       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3885         if (PrintGCDetails) {
3886           gclog_or_tty->print(" CMS: abort preclean due to time ");
3887         }
3888         break;
3889       }
3890       // If we are doing little work each iteration, we should
3891       // take a short break.
3892       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3893         // Sleep for some time, waiting for work to accumulate
3894         stopTimer();
3895         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3896         startTimer();
3897         waited++;
3898       }
3899     }
3900     if (PrintCMSStatistics > 0) {
3901       gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3902                           loops, waited, cumworkdone);
3903     }
3904   }
3905   CMSTokenSync x(true); // is cms thread
3906   if (_collectorState != Idling) {
3907     assert(_collectorState == AbortablePreclean,
3908            "Spontaneous state transition?");
3909     _collectorState = FinalMarking;
3910   } // Else, a foreground collection completed this CMS cycle.
3911   return;
3912 }
3913 
3914 // Respond to an Eden sampling opportunity
3915 void CMSCollector::sample_eden() {
3916   // Make sure a young gc cannot sneak in between our
3917   // reading and recording of a sample.
3918   assert(Thread::current()->is_ConcurrentGC_thread(),
3919          "Only the cms thread may collect Eden samples");
3920   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3921          "Should collect samples while holding CMS token");
3922   if (!_start_sampling) {
3923     return;
3924   }
3925   // When CMSEdenChunksRecordAlways is true, the eden chunk array
3926   // is populated by the young generation.
3927   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3928     if (_eden_chunk_index < _eden_chunk_capacity) {
3929       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3930       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3931              "Unexpected state of Eden");
3932       // We'd like to check that what we just sampled is an oop-start address;
3933       // however, we cannot do that here since the object may not yet have been
3934       // initialized. So we'll instead do the check when we _use_ this sample
3935       // later.
3936       if (_eden_chunk_index == 0 ||
3937           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3938                          _eden_chunk_array[_eden_chunk_index-1])
3939            >= CMSSamplingGrain)) {
3940         _eden_chunk_index++;  // commit sample
3941       }
3942     }
3943   }
3944   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3945     size_t used = get_eden_used();
3946     size_t capacity = get_eden_capacity();
3947     assert(used <= capacity, "Unexpected state of Eden");
3948     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3949       _abort_preclean = true;
3950     }
3951   }
3952 }
3953 
3954 
3955 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3956   assert(_collectorState == Precleaning ||
3957          _collectorState == AbortablePreclean, "incorrect state");
3958   ResourceMark rm;
3959   HandleMark   hm;
3960 
3961   // Precleaning is currently not MT but the reference processor
3962   // may be set for MT.  Disable it temporarily here.
3963   ReferenceProcessor* rp = ref_processor();
3964   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3965 
3966   // Do one pass of scrubbing the discovered reference lists
3967   // to remove any reference objects with strongly-reachable
3968   // referents.
3969   if (clean_refs) {
3970     CMSPrecleanRefsYieldClosure yield_cl(this);
3971     assert(rp->span().equals(_span), "Spans should be equal");
3972     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3973                                    &_markStack, true /* preclean */);
3974     CMSDrainMarkingStackClosure complete_trace(this,
3975                                    _span, &_markBitMap, &_markStack,
3976                                    &keep_alive, true /* preclean */);
3977 
3978     // We don't want this step to interfere with a young
3979     // collection because we don't want to take CPU
3980     // or memory bandwidth away from the young GC threads
3981     // (which may be as many as there are CPUs).
3982     // Note that we don't need to protect ourselves from
3983     // interference with mutators because they can't
3984     // manipulate the discovered reference lists nor affect
3985     // the computed reachability of the referents, the
3986     // only properties manipulated by the precleaning
3987     // of these reference lists.
3988     stopTimer();
3989     CMSTokenSyncWithLocks x(true /* is cms thread */,
3990                             bitMapLock());
3991     startTimer();
3992     sample_eden();
3993 
3994     // The following will yield to allow foreground
3995     // collection to proceed promptly. XXX YSR:
3996     // The code in this method may need further
3997     // tweaking for better performance and some restructuring
3998     // for cleaner interfaces.
3999     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4000     rp->preclean_discovered_references(
4001           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4002           gc_timer, _gc_tracer_cm->gc_id());
4003   }
4004 
4005   if (clean_survivor) {  // preclean the active survivor space(s)
4006     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4007                              &_markBitMap, &_modUnionTable,
4008                              &_markStack, true /* precleaning phase */);
4009     stopTimer();
4010     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4011                              bitMapLock());
4012     startTimer();
4013     unsigned int before_count =
4014       GenCollectedHeap::heap()->total_collections();
4015     SurvivorSpacePrecleanClosure
4016       sss_cl(this, _span, &_markBitMap, &_markStack,
4017              &pam_cl, before_count, CMSYield);
4018     _young_gen->from()->object_iterate_careful(&sss_cl);
4019     _young_gen->to()->object_iterate_careful(&sss_cl);
4020   }
4021   MarkRefsIntoAndScanClosure
4022     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4023              &_markStack, this, CMSYield,
4024              true /* precleaning phase */);
4025   // CAUTION: The following closure has persistent state that may need to
4026   // be reset upon a decrease in the sequence of addresses it
4027   // processes.
4028   ScanMarkedObjectsAgainCarefullyClosure
4029     smoac_cl(this, _span,
4030       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4031 
4032   // Preclean dirty cards in ModUnionTable and CardTable using
4033   // appropriate convergence criterion;
4034   // repeat CMSPrecleanIter times unless we find that
4035   // we are losing.
4036   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4037   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4038          "Bad convergence multiplier");
4039   assert(CMSPrecleanThreshold >= 100,
4040          "Unreasonably low CMSPrecleanThreshold");
4041 
4042   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4043   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4044        numIter < CMSPrecleanIter;
4045        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4046     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4047     if (Verbose && PrintGCDetails) {
4048       gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
4049     }
4050     // Either there are very few dirty cards, so re-mark
4051     // pause will be small anyway, or our pre-cleaning isn't
4052     // that much faster than the rate at which cards are being
4053     // dirtied, so we might as well stop and re-mark since
4054     // precleaning won't improve our re-mark time by much.
4055     if (curNumCards <= CMSPrecleanThreshold ||
4056         (numIter > 0 &&
4057          (curNumCards * CMSPrecleanDenominator >
4058          lastNumCards * CMSPrecleanNumerator))) {
4059       numIter++;
4060       cumNumCards += curNumCards;
4061       break;
4062     }
4063   }
4064 
4065   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4066 
4067   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4068   cumNumCards += curNumCards;
4069   if (PrintGCDetails && PrintCMSStatistics != 0) {
4070     gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
4071                   curNumCards, cumNumCards, numIter);
4072   }
4073   return cumNumCards;   // as a measure of useful work done
4074 }
4075 
4076 // PRECLEANING NOTES:
4077 // Precleaning involves:
4078 // . reading the bits of the modUnionTable and clearing the set bits.
4079 // . For the cards corresponding to the set bits, we scan the
4080 //   objects on those cards. This means we need the free_list_lock
4081 //   so that we can safely iterate over the CMS space when scanning
4082 //   for oops.
4083 // . When we scan the objects, we'll be both reading and setting
4084 //   marks in the marking bit map, so we'll need the marking bit map.
4085 // . For protecting _collector_state transitions, we take the CGC_lock.
4086 //   Note that any races in the reading of of card table entries by the
4087 //   CMS thread on the one hand and the clearing of those entries by the
4088 //   VM thread or the setting of those entries by the mutator threads on the
4089 //   other are quite benign. However, for efficiency it makes sense to keep
4090 //   the VM thread from racing with the CMS thread while the latter is
4091 //   dirty card info to the modUnionTable. We therefore also use the
4092 //   CGC_lock to protect the reading of the card table and the mod union
4093 //   table by the CM thread.
4094 // . We run concurrently with mutator updates, so scanning
4095 //   needs to be done carefully  -- we should not try to scan
4096 //   potentially uninitialized objects.
4097 //
4098 // Locking strategy: While holding the CGC_lock, we scan over and
4099 // reset a maximal dirty range of the mod union / card tables, then lock
4100 // the free_list_lock and bitmap lock to do a full marking, then
4101 // release these locks; and repeat the cycle. This allows for a
4102 // certain amount of fairness in the sharing of these locks between
4103 // the CMS collector on the one hand, and the VM thread and the
4104 // mutators on the other.
4105 
4106 // NOTE: preclean_mod_union_table() and preclean_card_table()
4107 // further below are largely identical; if you need to modify
4108 // one of these methods, please check the other method too.
4109 
4110 size_t CMSCollector::preclean_mod_union_table(
4111   ConcurrentMarkSweepGeneration* gen,
4112   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4113   verify_work_stacks_empty();
4114   verify_overflow_empty();
4115 
4116   // strategy: starting with the first card, accumulate contiguous
4117   // ranges of dirty cards; clear these cards, then scan the region
4118   // covered by these cards.
4119 
4120   // Since all of the MUT is committed ahead, we can just use
4121   // that, in case the generations expand while we are precleaning.
4122   // It might also be fine to just use the committed part of the
4123   // generation, but we might potentially miss cards when the
4124   // generation is rapidly expanding while we are in the midst
4125   // of precleaning.
4126   HeapWord* startAddr = gen->reserved().start();
4127   HeapWord* endAddr   = gen->reserved().end();
4128 
4129   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4130 
4131   size_t numDirtyCards, cumNumDirtyCards;
4132   HeapWord *nextAddr, *lastAddr;
4133   for (cumNumDirtyCards = numDirtyCards = 0,
4134        nextAddr = lastAddr = startAddr;
4135        nextAddr < endAddr;
4136        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4137 
4138     ResourceMark rm;
4139     HandleMark   hm;
4140 
4141     MemRegion dirtyRegion;
4142     {
4143       stopTimer();
4144       // Potential yield point
4145       CMSTokenSync ts(true);
4146       startTimer();
4147       sample_eden();
4148       // Get dirty region starting at nextOffset (inclusive),
4149       // simultaneously clearing it.
4150       dirtyRegion =
4151         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4152       assert(dirtyRegion.start() >= nextAddr,
4153              "returned region inconsistent?");
4154     }
4155     // Remember where the next search should begin.
4156     // The returned region (if non-empty) is a right open interval,
4157     // so lastOffset is obtained from the right end of that
4158     // interval.
4159     lastAddr = dirtyRegion.end();
4160     // Should do something more transparent and less hacky XXX
4161     numDirtyCards =
4162       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4163 
4164     // We'll scan the cards in the dirty region (with periodic
4165     // yields for foreground GC as needed).
4166     if (!dirtyRegion.is_empty()) {
4167       assert(numDirtyCards > 0, "consistency check");
4168       HeapWord* stop_point = NULL;
4169       stopTimer();
4170       // Potential yield point
4171       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4172                                bitMapLock());
4173       startTimer();
4174       {
4175         verify_work_stacks_empty();
4176         verify_overflow_empty();
4177         sample_eden();
4178         stop_point =
4179           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4180       }
4181       if (stop_point != NULL) {
4182         // The careful iteration stopped early either because it found an
4183         // uninitialized object, or because we were in the midst of an
4184         // "abortable preclean", which should now be aborted. Redirty
4185         // the bits corresponding to the partially-scanned or unscanned
4186         // cards. We'll either restart at the next block boundary or
4187         // abort the preclean.
4188         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4189                "Should only be AbortablePreclean.");
4190         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4191         if (should_abort_preclean()) {
4192           break; // out of preclean loop
4193         } else {
4194           // Compute the next address at which preclean should pick up;
4195           // might need bitMapLock in order to read P-bits.
4196           lastAddr = next_card_start_after_block(stop_point);
4197         }
4198       }
4199     } else {
4200       assert(lastAddr == endAddr, "consistency check");
4201       assert(numDirtyCards == 0, "consistency check");
4202       break;
4203     }
4204   }
4205   verify_work_stacks_empty();
4206   verify_overflow_empty();
4207   return cumNumDirtyCards;
4208 }
4209 
4210 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4211 // below are largely identical; if you need to modify
4212 // one of these methods, please check the other method too.
4213 
4214 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4215   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4216   // strategy: it's similar to precleamModUnionTable above, in that
4217   // we accumulate contiguous ranges of dirty cards, mark these cards
4218   // precleaned, then scan the region covered by these cards.
4219   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4220   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4221 
4222   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4223 
4224   size_t numDirtyCards, cumNumDirtyCards;
4225   HeapWord *lastAddr, *nextAddr;
4226 
4227   for (cumNumDirtyCards = numDirtyCards = 0,
4228        nextAddr = lastAddr = startAddr;
4229        nextAddr < endAddr;
4230        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4231 
4232     ResourceMark rm;
4233     HandleMark   hm;
4234 
4235     MemRegion dirtyRegion;
4236     {
4237       // See comments in "Precleaning notes" above on why we
4238       // do this locking. XXX Could the locking overheads be
4239       // too high when dirty cards are sparse? [I don't think so.]
4240       stopTimer();
4241       CMSTokenSync x(true); // is cms thread
4242       startTimer();
4243       sample_eden();
4244       // Get and clear dirty region from card table
4245       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4246                                     MemRegion(nextAddr, endAddr),
4247                                     true,
4248                                     CardTableModRefBS::precleaned_card_val());
4249 
4250       assert(dirtyRegion.start() >= nextAddr,
4251              "returned region inconsistent?");
4252     }
4253     lastAddr = dirtyRegion.end();
4254     numDirtyCards =
4255       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4256 
4257     if (!dirtyRegion.is_empty()) {
4258       stopTimer();
4259       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4260       startTimer();
4261       sample_eden();
4262       verify_work_stacks_empty();
4263       verify_overflow_empty();
4264       HeapWord* stop_point =
4265         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4266       if (stop_point != NULL) {
4267         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4268                "Should only be AbortablePreclean.");
4269         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4270         if (should_abort_preclean()) {
4271           break; // out of preclean loop
4272         } else {
4273           // Compute the next address at which preclean should pick up.
4274           lastAddr = next_card_start_after_block(stop_point);
4275         }
4276       }
4277     } else {
4278       break;
4279     }
4280   }
4281   verify_work_stacks_empty();
4282   verify_overflow_empty();
4283   return cumNumDirtyCards;
4284 }
4285 
4286 class PrecleanKlassClosure : public KlassClosure {
4287   KlassToOopClosure _cm_klass_closure;
4288  public:
4289   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4290   void do_klass(Klass* k) {
4291     if (k->has_accumulated_modified_oops()) {
4292       k->clear_accumulated_modified_oops();
4293 
4294       _cm_klass_closure.do_klass(k);
4295     }
4296   }
4297 };
4298 
4299 // The freelist lock is needed to prevent asserts, is it really needed?
4300 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4301 
4302   cl->set_freelistLock(freelistLock);
4303 
4304   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4305 
4306   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4307   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4308   PrecleanKlassClosure preclean_klass_closure(cl);
4309   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4310 
4311   verify_work_stacks_empty();
4312   verify_overflow_empty();
4313 }
4314 
4315 void CMSCollector::checkpointRootsFinal() {
4316   assert(_collectorState == FinalMarking, "incorrect state transition?");
4317   check_correct_thread_executing();
4318   // world is stopped at this checkpoint
4319   assert(SafepointSynchronize::is_at_safepoint(),
4320          "world should be stopped");
4321   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4322 
4323   verify_work_stacks_empty();
4324   verify_overflow_empty();
4325 
4326   SpecializationStats::clear();
4327   if (PrintGCDetails) {
4328     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4329                         _young_gen->used() / K,
4330                         _young_gen->capacity() / K);
4331   }
4332   {
4333     if (CMSScavengeBeforeRemark) {
4334       GenCollectedHeap* gch = GenCollectedHeap::heap();
4335       // Temporarily set flag to false, GCH->do_collection will
4336       // expect it to be false and set to true
4337       FlagSetting fl(gch->_is_gc_active, false);
4338       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4339         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4340       int level = _cmsGen->level() - 1;
4341       if (level >= 0) {
4342         gch->do_collection(true,        // full (i.e. force, see below)
4343                            false,       // !clear_all_soft_refs
4344                            0,           // size
4345                            false,       // is_tlab
4346                            level        // max_level
4347                           );
4348       }
4349     }
4350     FreelistLocker x(this);
4351     MutexLockerEx y(bitMapLock(),
4352                     Mutex::_no_safepoint_check_flag);
4353     checkpointRootsFinalWork();
4354   }
4355   verify_work_stacks_empty();
4356   verify_overflow_empty();
4357   SpecializationStats::print();
4358 }
4359 
4360 void CMSCollector::checkpointRootsFinalWork() {
4361   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4362 
4363   assert(haveFreelistLocks(), "must have free list locks");
4364   assert_lock_strong(bitMapLock());
4365 
4366   ResourceMark rm;
4367   HandleMark   hm;
4368 
4369   GenCollectedHeap* gch = GenCollectedHeap::heap();
4370 
4371   if (should_unload_classes()) {
4372     CodeCache::gc_prologue();
4373   }
4374   assert(haveFreelistLocks(), "must have free list locks");
4375   assert_lock_strong(bitMapLock());
4376 
4377   // We might assume that we need not fill TLAB's when
4378   // CMSScavengeBeforeRemark is set, because we may have just done
4379   // a scavenge which would have filled all TLAB's -- and besides
4380   // Eden would be empty. This however may not always be the case --
4381   // for instance although we asked for a scavenge, it may not have
4382   // happened because of a JNI critical section. We probably need
4383   // a policy for deciding whether we can in that case wait until
4384   // the critical section releases and then do the remark following
4385   // the scavenge, and skip it here. In the absence of that policy,
4386   // or of an indication of whether the scavenge did indeed occur,
4387   // we cannot rely on TLAB's having been filled and must do
4388   // so here just in case a scavenge did not happen.
4389   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4390   // Update the saved marks which may affect the root scans.
4391   gch->save_marks();
4392 
4393   if (CMSPrintEdenSurvivorChunks) {
4394     print_eden_and_survivor_chunk_arrays();
4395   }
4396 
4397   {
4398     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4399 
4400     // Note on the role of the mod union table:
4401     // Since the marker in "markFromRoots" marks concurrently with
4402     // mutators, it is possible for some reachable objects not to have been
4403     // scanned. For instance, an only reference to an object A was
4404     // placed in object B after the marker scanned B. Unless B is rescanned,
4405     // A would be collected. Such updates to references in marked objects
4406     // are detected via the mod union table which is the set of all cards
4407     // dirtied since the first checkpoint in this GC cycle and prior to
4408     // the most recent young generation GC, minus those cleaned up by the
4409     // concurrent precleaning.
4410     if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
4411       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
4412       do_remark_parallel();
4413     } else {
4414       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4415                   _gc_timer_cm, _gc_tracer_cm->gc_id());
4416       do_remark_non_parallel();
4417     }
4418   }
4419   verify_work_stacks_empty();
4420   verify_overflow_empty();
4421 
4422   {
4423     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4424     refProcessingWork();
4425   }
4426   verify_work_stacks_empty();
4427   verify_overflow_empty();
4428 
4429   if (should_unload_classes()) {
4430     CodeCache::gc_epilogue();
4431   }
4432   JvmtiExport::gc_epilogue();
4433 
4434   // If we encountered any (marking stack / work queue) overflow
4435   // events during the current CMS cycle, take appropriate
4436   // remedial measures, where possible, so as to try and avoid
4437   // recurrence of that condition.
4438   assert(_markStack.isEmpty(), "No grey objects");
4439   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4440                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4441   if (ser_ovflw > 0) {
4442     if (PrintCMSStatistics != 0) {
4443       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4444         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4445         ", kac_preclean="SIZE_FORMAT")",
4446         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4447         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4448     }
4449     _markStack.expand();
4450     _ser_pmc_remark_ovflw = 0;
4451     _ser_pmc_preclean_ovflw = 0;
4452     _ser_kac_preclean_ovflw = 0;
4453     _ser_kac_ovflw = 0;
4454   }
4455   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4456     if (PrintCMSStatistics != 0) {
4457       gclog_or_tty->print_cr("Work queue overflow (benign) "
4458         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4459         _par_pmc_remark_ovflw, _par_kac_ovflw);
4460     }
4461     _par_pmc_remark_ovflw = 0;
4462     _par_kac_ovflw = 0;
4463   }
4464   if (PrintCMSStatistics != 0) {
4465      if (_markStack._hit_limit > 0) {
4466        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4467                               _markStack._hit_limit);
4468      }
4469      if (_markStack._failed_double > 0) {
4470        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4471                               " current capacity "SIZE_FORMAT,
4472                               _markStack._failed_double,
4473                               _markStack.capacity());
4474      }
4475   }
4476   _markStack._hit_limit = 0;
4477   _markStack._failed_double = 0;
4478 
4479   if ((VerifyAfterGC || VerifyDuringGC) &&
4480       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4481     verify_after_remark();
4482   }
4483 
4484   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4485 
4486   // Change under the freelistLocks.
4487   _collectorState = Sweeping;
4488   // Call isAllClear() under bitMapLock
4489   assert(_modUnionTable.isAllClear(),
4490       "Should be clear by end of the final marking");
4491   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4492       "Should be clear by end of the final marking");
4493 }
4494 
4495 void CMSParInitialMarkTask::work(uint worker_id) {
4496   elapsedTimer _timer;
4497   ResourceMark rm;
4498   HandleMark   hm;
4499 
4500   // ---------- scan from roots --------------
4501   _timer.start();
4502   GenCollectedHeap* gch = GenCollectedHeap::heap();
4503   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4504 
4505   // ---------- young gen roots --------------
4506   {
4507     work_on_young_gen_roots(worker_id, &par_mri_cl);
4508     _timer.stop();
4509     if (PrintCMSStatistics != 0) {
4510       gclog_or_tty->print_cr(
4511         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4512         worker_id, _timer.seconds());
4513     }
4514   }
4515 
4516   // ---------- remaining roots --------------
4517   _timer.reset();
4518   _timer.start();
4519 
4520   CLDToOopClosure cld_closure(&par_mri_cl, true);
4521 
4522   gch->gen_process_roots(_collector->_cmsGen->level(),
4523                          false,     // yg was scanned above
4524                          false,     // this is parallel code
4525                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4526                          _collector->should_unload_classes(),
4527                          &par_mri_cl,
4528                          NULL,
4529                          &cld_closure);
4530   assert(_collector->should_unload_classes()
4531          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
4532          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4533   _timer.stop();
4534   if (PrintCMSStatistics != 0) {
4535     gclog_or_tty->print_cr(
4536       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4537       worker_id, _timer.seconds());
4538   }
4539 }
4540 
4541 // Parallel remark task
4542 class CMSParRemarkTask: public CMSParMarkTask {
4543   CompactibleFreeListSpace* _cms_space;
4544 
4545   // The per-thread work queues, available here for stealing.
4546   OopTaskQueueSet*       _task_queues;
4547   ParallelTaskTerminator _term;
4548 
4549  public:
4550   // A value of 0 passed to n_workers will cause the number of
4551   // workers to be taken from the active workers in the work gang.
4552   CMSParRemarkTask(CMSCollector* collector,
4553                    CompactibleFreeListSpace* cms_space,
4554                    int n_workers, FlexibleWorkGang* workers,
4555                    OopTaskQueueSet* task_queues):
4556     CMSParMarkTask("Rescan roots and grey objects in parallel",
4557                    collector, n_workers),
4558     _cms_space(cms_space),
4559     _task_queues(task_queues),
4560     _term(n_workers, task_queues) { }
4561 
4562   OopTaskQueueSet* task_queues() { return _task_queues; }
4563 
4564   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4565 
4566   ParallelTaskTerminator* terminator() { return &_term; }
4567   int n_workers() { return _n_workers; }
4568 
4569   void work(uint worker_id);
4570 
4571  private:
4572   // ... of  dirty cards in old space
4573   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4574                                   Par_MarkRefsIntoAndScanClosure* cl);
4575 
4576   // ... work stealing for the above
4577   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4578 };
4579 
4580 class RemarkKlassClosure : public KlassClosure {
4581   KlassToOopClosure _cm_klass_closure;
4582  public:
4583   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4584   void do_klass(Klass* k) {
4585     // Check if we have modified any oops in the Klass during the concurrent marking.
4586     if (k->has_accumulated_modified_oops()) {
4587       k->clear_accumulated_modified_oops();
4588 
4589       // We could have transfered the current modified marks to the accumulated marks,
4590       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4591     } else if (k->has_modified_oops()) {
4592       // Don't clear anything, this info is needed by the next young collection.
4593     } else {
4594       // No modified oops in the Klass.
4595       return;
4596     }
4597 
4598     // The klass has modified fields, need to scan the klass.
4599     _cm_klass_closure.do_klass(k);
4600   }
4601 };
4602 
4603 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4604   ParNewGeneration* young_gen = _collector->_young_gen;
4605   ContiguousSpace* eden_space = young_gen->eden();
4606   ContiguousSpace* from_space = young_gen->from();
4607   ContiguousSpace* to_space   = young_gen->to();
4608 
4609   HeapWord** eca = _collector->_eden_chunk_array;
4610   size_t     ect = _collector->_eden_chunk_index;
4611   HeapWord** sca = _collector->_survivor_chunk_array;
4612   size_t     sct = _collector->_survivor_chunk_index;
4613 
4614   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4615   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4616 
4617   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4618   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4619   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4620 }
4621 
4622 // work_queue(i) is passed to the closure
4623 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
4624 // also is passed to do_dirty_card_rescan_tasks() and to
4625 // do_work_steal() to select the i-th task_queue.
4626 
4627 void CMSParRemarkTask::work(uint worker_id) {
4628   elapsedTimer _timer;
4629   ResourceMark rm;
4630   HandleMark   hm;
4631 
4632   // ---------- rescan from roots --------------
4633   _timer.start();
4634   GenCollectedHeap* gch = GenCollectedHeap::heap();
4635   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4636     _collector->_span, _collector->ref_processor(),
4637     &(_collector->_markBitMap),
4638     work_queue(worker_id));
4639 
4640   // Rescan young gen roots first since these are likely
4641   // coarsely partitioned and may, on that account, constitute
4642   // the critical path; thus, it's best to start off that
4643   // work first.
4644   // ---------- young gen roots --------------
4645   {
4646     work_on_young_gen_roots(worker_id, &par_mrias_cl);
4647     _timer.stop();
4648     if (PrintCMSStatistics != 0) {
4649       gclog_or_tty->print_cr(
4650         "Finished young gen rescan work in %dth thread: %3.3f sec",
4651         worker_id, _timer.seconds());
4652     }
4653   }
4654 
4655   // ---------- remaining roots --------------
4656   _timer.reset();
4657   _timer.start();
4658   gch->gen_process_roots(_collector->_cmsGen->level(),
4659                          false,     // yg was scanned above
4660                          false,     // this is parallel code
4661                          SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4662                          _collector->should_unload_classes(),
4663                          &par_mrias_cl,
4664                          NULL,
4665                          NULL);     // The dirty klasses will be handled below
4666 
4667   assert(_collector->should_unload_classes()
4668          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
4669          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4670   _timer.stop();
4671   if (PrintCMSStatistics != 0) {
4672     gclog_or_tty->print_cr(
4673       "Finished remaining root rescan work in %dth thread: %3.3f sec",
4674       worker_id, _timer.seconds());
4675   }
4676 
4677   // ---------- unhandled CLD scanning ----------
4678   if (worker_id == 0) { // Single threaded at the moment.
4679     _timer.reset();
4680     _timer.start();
4681 
4682     // Scan all new class loader data objects and new dependencies that were
4683     // introduced during concurrent marking.
4684     ResourceMark rm;
4685     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4686     for (int i = 0; i < array->length(); i++) {
4687       par_mrias_cl.do_class_loader_data(array->at(i));
4688     }
4689 
4690     // We don't need to keep track of new CLDs anymore.
4691     ClassLoaderDataGraph::remember_new_clds(false);
4692 
4693     _timer.stop();
4694     if (PrintCMSStatistics != 0) {
4695       gclog_or_tty->print_cr(
4696           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4697           worker_id, _timer.seconds());
4698     }
4699   }
4700 
4701   // ---------- dirty klass scanning ----------
4702   if (worker_id == 0) { // Single threaded at the moment.
4703     _timer.reset();
4704     _timer.start();
4705 
4706     // Scan all classes that was dirtied during the concurrent marking phase.
4707     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4708     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4709 
4710     _timer.stop();
4711     if (PrintCMSStatistics != 0) {
4712       gclog_or_tty->print_cr(
4713           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4714           worker_id, _timer.seconds());
4715     }
4716   }
4717 
4718   // We might have added oops to ClassLoaderData::_handles during the
4719   // concurrent marking phase. These oops point to newly allocated objects
4720   // that are guaranteed to be kept alive. Either by the direct allocation
4721   // code, or when the young collector processes the roots. Hence,
4722   // we don't have to revisit the _handles block during the remark phase.
4723 
4724   // ---------- rescan dirty cards ------------
4725   _timer.reset();
4726   _timer.start();
4727 
4728   // Do the rescan tasks for each of the two spaces
4729   // (cms_space) in turn.
4730   // "worker_id" is passed to select the task_queue for "worker_id"
4731   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4732   _timer.stop();
4733   if (PrintCMSStatistics != 0) {
4734     gclog_or_tty->print_cr(
4735       "Finished dirty card rescan work in %dth thread: %3.3f sec",
4736       worker_id, _timer.seconds());
4737   }
4738 
4739   // ---------- steal work from other threads ...
4740   // ---------- ... and drain overflow list.
4741   _timer.reset();
4742   _timer.start();
4743   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4744   _timer.stop();
4745   if (PrintCMSStatistics != 0) {
4746     gclog_or_tty->print_cr(
4747       "Finished work stealing in %dth thread: %3.3f sec",
4748       worker_id, _timer.seconds());
4749   }
4750 }
4751 
4752 // Note that parameter "i" is not used.
4753 void
4754 CMSParMarkTask::do_young_space_rescan(uint worker_id,
4755   OopsInGenClosure* cl, ContiguousSpace* space,
4756   HeapWord** chunk_array, size_t chunk_top) {
4757   // Until all tasks completed:
4758   // . claim an unclaimed task
4759   // . compute region boundaries corresponding to task claimed
4760   //   using chunk_array
4761   // . par_oop_iterate(cl) over that region
4762 
4763   ResourceMark rm;
4764   HandleMark   hm;
4765 
4766   SequentialSubTasksDone* pst = space->par_seq_tasks();
4767 
4768   uint nth_task = 0;
4769   uint n_tasks  = pst->n_tasks();
4770 
4771   if (n_tasks > 0) {
4772     assert(pst->valid(), "Uninitialized use?");
4773     HeapWord *start, *end;
4774     while (!pst->is_task_claimed(/* reference */ nth_task)) {
4775       // We claimed task # nth_task; compute its boundaries.
4776       if (chunk_top == 0) {  // no samples were taken
4777         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4778         start = space->bottom();
4779         end   = space->top();
4780       } else if (nth_task == 0) {
4781         start = space->bottom();
4782         end   = chunk_array[nth_task];
4783       } else if (nth_task < (uint)chunk_top) {
4784         assert(nth_task >= 1, "Control point invariant");
4785         start = chunk_array[nth_task - 1];
4786         end   = chunk_array[nth_task];
4787       } else {
4788         assert(nth_task == (uint)chunk_top, "Control point invariant");
4789         start = chunk_array[chunk_top - 1];
4790         end   = space->top();
4791       }
4792       MemRegion mr(start, end);
4793       // Verify that mr is in space
4794       assert(mr.is_empty() || space->used_region().contains(mr),
4795              "Should be in space");
4796       // Verify that "start" is an object boundary
4797       assert(mr.is_empty() || oop(mr.start())->is_oop(),
4798              "Should be an oop");
4799       space->par_oop_iterate(mr, cl);
4800     }
4801     pst->all_tasks_completed();
4802   }
4803 }
4804 
4805 void
4806 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4807   CompactibleFreeListSpace* sp, int i,
4808   Par_MarkRefsIntoAndScanClosure* cl) {
4809   // Until all tasks completed:
4810   // . claim an unclaimed task
4811   // . compute region boundaries corresponding to task claimed
4812   // . transfer dirty bits ct->mut for that region
4813   // . apply rescanclosure to dirty mut bits for that region
4814 
4815   ResourceMark rm;
4816   HandleMark   hm;
4817 
4818   OopTaskQueue* work_q = work_queue(i);
4819   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4820   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4821   // CAUTION: This closure has state that persists across calls to
4822   // the work method dirty_range_iterate_clear() in that it has
4823   // embedded in it a (subtype of) UpwardsObjectClosure. The
4824   // use of that state in the embedded UpwardsObjectClosure instance
4825   // assumes that the cards are always iterated (even if in parallel
4826   // by several threads) in monotonically increasing order per each
4827   // thread. This is true of the implementation below which picks
4828   // card ranges (chunks) in monotonically increasing order globally
4829   // and, a-fortiori, in monotonically increasing order per thread
4830   // (the latter order being a subsequence of the former).
4831   // If the work code below is ever reorganized into a more chaotic
4832   // work-partitioning form than the current "sequential tasks"
4833   // paradigm, the use of that persistent state will have to be
4834   // revisited and modified appropriately. See also related
4835   // bug 4756801 work on which should examine this code to make
4836   // sure that the changes there do not run counter to the
4837   // assumptions made here and necessary for correctness and
4838   // efficiency. Note also that this code might yield inefficient
4839   // behavior in the case of very large objects that span one or
4840   // more work chunks. Such objects would potentially be scanned
4841   // several times redundantly. Work on 4756801 should try and
4842   // address that performance anomaly if at all possible. XXX
4843   MemRegion  full_span  = _collector->_span;
4844   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4845   MarkFromDirtyCardsClosure
4846     greyRescanClosure(_collector, full_span, // entire span of interest
4847                       sp, bm, work_q, cl);
4848 
4849   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4850   assert(pst->valid(), "Uninitialized use?");
4851   uint nth_task = 0;
4852   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4853   MemRegion span = sp->used_region();
4854   HeapWord* start_addr = span.start();
4855   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4856                                            alignment);
4857   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4858   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4859          start_addr, "Check alignment");
4860   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4861          chunk_size, "Check alignment");
4862 
4863   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4864     // Having claimed the nth_task, compute corresponding mem-region,
4865     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4866     // The alignment restriction ensures that we do not need any
4867     // synchronization with other gang-workers while setting or
4868     // clearing bits in thus chunk of the MUT.
4869     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4870                                     start_addr + (nth_task+1)*chunk_size);
4871     // The last chunk's end might be way beyond end of the
4872     // used region. In that case pull back appropriately.
4873     if (this_span.end() > end_addr) {
4874       this_span.set_end(end_addr);
4875       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4876     }
4877     // Iterate over the dirty cards covering this chunk, marking them
4878     // precleaned, and setting the corresponding bits in the mod union
4879     // table. Since we have been careful to partition at Card and MUT-word
4880     // boundaries no synchronization is needed between parallel threads.
4881     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4882                                                  &modUnionClosure);
4883 
4884     // Having transferred these marks into the modUnionTable,
4885     // rescan the marked objects on the dirty cards in the modUnionTable.
4886     // Even if this is at a synchronous collection, the initial marking
4887     // may have been done during an asynchronous collection so there
4888     // may be dirty bits in the mod-union table.
4889     _collector->_modUnionTable.dirty_range_iterate_clear(
4890                   this_span, &greyRescanClosure);
4891     _collector->_modUnionTable.verifyNoOneBitsInRange(
4892                                  this_span.start(),
4893                                  this_span.end());
4894   }
4895   pst->all_tasks_completed();  // declare that i am done
4896 }
4897 
4898 // . see if we can share work_queues with ParNew? XXX
4899 void
4900 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
4901                                 int* seed) {
4902   OopTaskQueue* work_q = work_queue(i);
4903   NOT_PRODUCT(int num_steals = 0;)
4904   oop obj_to_scan;
4905   CMSBitMap* bm = &(_collector->_markBitMap);
4906 
4907   while (true) {
4908     // Completely finish any left over work from (an) earlier round(s)
4909     cl->trim_queue(0);
4910     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4911                                          (size_t)ParGCDesiredObjsFromOverflowList);
4912     // Now check if there's any work in the overflow list
4913     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4914     // only affects the number of attempts made to get work from the
4915     // overflow list and does not affect the number of workers.  Just
4916     // pass ParallelGCThreads so this behavior is unchanged.
4917     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4918                                                 work_q,
4919                                                 ParallelGCThreads)) {
4920       // found something in global overflow list;
4921       // not yet ready to go stealing work from others.
4922       // We'd like to assert(work_q->size() != 0, ...)
4923       // because we just took work from the overflow list,
4924       // but of course we can't since all of that could have
4925       // been already stolen from us.
4926       // "He giveth and He taketh away."
4927       continue;
4928     }
4929     // Verify that we have no work before we resort to stealing
4930     assert(work_q->size() == 0, "Have work, shouldn't steal");
4931     // Try to steal from other queues that have work
4932     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4933       NOT_PRODUCT(num_steals++;)
4934       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4935       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4936       // Do scanning work
4937       obj_to_scan->oop_iterate(cl);
4938       // Loop around, finish this work, and try to steal some more
4939     } else if (terminator()->offer_termination()) {
4940         break;  // nirvana from the infinite cycle
4941     }
4942   }
4943   NOT_PRODUCT(
4944     if (PrintCMSStatistics != 0) {
4945       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4946     }
4947   )
4948   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4949          "Else our work is not yet done");
4950 }
4951 
4952 // Record object boundaries in _eden_chunk_array by sampling the eden
4953 // top in the slow-path eden object allocation code path and record
4954 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4955 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4956 // sampling in sample_eden() that activates during the part of the
4957 // preclean phase.
4958 void CMSCollector::sample_eden_chunk() {
4959   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4960     if (_eden_chunk_lock->try_lock()) {
4961       // Record a sample. This is the critical section. The contents
4962       // of the _eden_chunk_array have to be non-decreasing in the
4963       // address order.
4964       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4965       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4966              "Unexpected state of Eden");
4967       if (_eden_chunk_index == 0 ||
4968           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4969            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4970                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4971         _eden_chunk_index++;  // commit sample
4972       }
4973       _eden_chunk_lock->unlock();
4974     }
4975   }
4976 }
4977 
4978 // Return a thread-local PLAB recording array, as appropriate.
4979 void* CMSCollector::get_data_recorder(int thr_num) {
4980   if (_survivor_plab_array != NULL &&
4981       (CMSPLABRecordAlways ||
4982        (_collectorState > Marking && _collectorState < FinalMarking))) {
4983     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4984     ChunkArray* ca = &_survivor_plab_array[thr_num];
4985     ca->reset();   // clear it so that fresh data is recorded
4986     return (void*) ca;
4987   } else {
4988     return NULL;
4989   }
4990 }
4991 
4992 // Reset all the thread-local PLAB recording arrays
4993 void CMSCollector::reset_survivor_plab_arrays() {
4994   for (uint i = 0; i < ParallelGCThreads; i++) {
4995     _survivor_plab_array[i].reset();
4996   }
4997 }
4998 
4999 // Merge the per-thread plab arrays into the global survivor chunk
5000 // array which will provide the partitioning of the survivor space
5001 // for CMS initial scan and rescan.
5002 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5003                                               int no_of_gc_threads) {
5004   assert(_survivor_plab_array  != NULL, "Error");
5005   assert(_survivor_chunk_array != NULL, "Error");
5006   assert(_collectorState == FinalMarking ||
5007          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5008   for (int j = 0; j < no_of_gc_threads; j++) {
5009     _cursor[j] = 0;
5010   }
5011   HeapWord* top = surv->top();
5012   size_t i;
5013   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5014     HeapWord* min_val = top;          // Higher than any PLAB address
5015     uint      min_tid = 0;            // position of min_val this round
5016     for (int j = 0; j < no_of_gc_threads; j++) {
5017       ChunkArray* cur_sca = &_survivor_plab_array[j];
5018       if (_cursor[j] == cur_sca->end()) {
5019         continue;
5020       }
5021       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5022       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5023       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5024       if (cur_val < min_val) {
5025         min_tid = j;
5026         min_val = cur_val;
5027       } else {
5028         assert(cur_val < top, "All recorded addresses should be less");
5029       }
5030     }
5031     // At this point min_val and min_tid are respectively
5032     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5033     // and the thread (j) that witnesses that address.
5034     // We record this address in the _survivor_chunk_array[i]
5035     // and increment _cursor[min_tid] prior to the next round i.
5036     if (min_val == top) {
5037       break;
5038     }
5039     _survivor_chunk_array[i] = min_val;
5040     _cursor[min_tid]++;
5041   }
5042   // We are all done; record the size of the _survivor_chunk_array
5043   _survivor_chunk_index = i; // exclusive: [0, i)
5044   if (PrintCMSStatistics > 0) {
5045     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5046   }
5047   // Verify that we used up all the recorded entries
5048   #ifdef ASSERT
5049     size_t total = 0;
5050     for (int j = 0; j < no_of_gc_threads; j++) {
5051       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5052       total += _cursor[j];
5053     }
5054     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5055     // Check that the merged array is in sorted order
5056     if (total > 0) {
5057       for (size_t i = 0; i < total - 1; i++) {
5058         if (PrintCMSStatistics > 0) {
5059           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5060                               i, _survivor_chunk_array[i]);
5061         }
5062         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5063                "Not sorted");
5064       }
5065     }
5066   #endif // ASSERT
5067 }
5068 
5069 // Set up the space's par_seq_tasks structure for work claiming
5070 // for parallel initial scan and rescan of young gen.
5071 // See ParRescanTask where this is currently used.
5072 void
5073 CMSCollector::
5074 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5075   assert(n_threads > 0, "Unexpected n_threads argument");
5076 
5077   // Eden space
5078   if (!_young_gen->eden()->is_empty()) {
5079     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5080     assert(!pst->valid(), "Clobbering existing data?");
5081     // Each valid entry in [0, _eden_chunk_index) represents a task.
5082     size_t n_tasks = _eden_chunk_index + 1;
5083     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5084     // Sets the condition for completion of the subtask (how many threads
5085     // need to finish in order to be done).
5086     pst->set_n_threads(n_threads);
5087     pst->set_n_tasks((int)n_tasks);
5088   }
5089 
5090   // Merge the survivor plab arrays into _survivor_chunk_array
5091   if (_survivor_plab_array != NULL) {
5092     merge_survivor_plab_arrays(_young_gen->from(), n_threads);
5093   } else {
5094     assert(_survivor_chunk_index == 0, "Error");
5095   }
5096 
5097   // To space
5098   {
5099     SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
5100     assert(!pst->valid(), "Clobbering existing data?");
5101     // Sets the condition for completion of the subtask (how many threads
5102     // need to finish in order to be done).
5103     pst->set_n_threads(n_threads);
5104     pst->set_n_tasks(1);
5105     assert(pst->valid(), "Error");
5106   }
5107 
5108   // From space
5109   {
5110     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5111     assert(!pst->valid(), "Clobbering existing data?");
5112     size_t n_tasks = _survivor_chunk_index + 1;
5113     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5114     // Sets the condition for completion of the subtask (how many threads
5115     // need to finish in order to be done).
5116     pst->set_n_threads(n_threads);
5117     pst->set_n_tasks((int)n_tasks);
5118     assert(pst->valid(), "Error");
5119   }
5120 }
5121 
5122 // Parallel version of remark
5123 void CMSCollector::do_remark_parallel() {
5124   GenCollectedHeap* gch = GenCollectedHeap::heap();
5125   FlexibleWorkGang* workers = gch->workers();
5126   assert(workers != NULL, "Need parallel worker threads.");
5127   // Choose to use the number of GC workers most recently set
5128   // into "active_workers".  If active_workers is not set, set it
5129   // to ParallelGCThreads.
5130   int n_workers = workers->active_workers();
5131   if (n_workers == 0) {
5132     assert(n_workers > 0, "Should have been set during scavenge");
5133     n_workers = ParallelGCThreads;
5134     workers->set_active_workers(n_workers);
5135   }
5136   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5137 
5138   CMSParRemarkTask tsk(this,
5139     cms_space,
5140     n_workers, workers, task_queues());
5141 
5142   // Set up for parallel process_roots work.
5143   gch->set_par_threads(n_workers);
5144   // We won't be iterating over the cards in the card table updating
5145   // the younger_gen cards, so we shouldn't call the following else
5146   // the verification code as well as subsequent younger_refs_iterate
5147   // code would get confused. XXX
5148   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5149 
5150   // The young gen rescan work will not be done as part of
5151   // process_roots (which currently doesn't know how to
5152   // parallelize such a scan), but rather will be broken up into
5153   // a set of parallel tasks (via the sampling that the [abortable]
5154   // preclean phase did of eden, plus the [two] tasks of
5155   // scanning the [two] survivor spaces. Further fine-grain
5156   // parallelization of the scanning of the survivor spaces
5157   // themselves, and of precleaning of the younger gen itself
5158   // is deferred to the future.
5159   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5160 
5161   // The dirty card rescan work is broken up into a "sequence"
5162   // of parallel tasks (per constituent space) that are dynamically
5163   // claimed by the parallel threads.
5164   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5165 
5166   // It turns out that even when we're using 1 thread, doing the work in a
5167   // separate thread causes wide variance in run times.  We can't help this
5168   // in the multi-threaded case, but we special-case n=1 here to get
5169   // repeatable measurements of the 1-thread overhead of the parallel code.
5170   if (n_workers > 1) {
5171     // Make refs discovery MT-safe, if it isn't already: it may not
5172     // necessarily be so, since it's possible that we are doing
5173     // ST marking.
5174     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5175     GenCollectedHeap::StrongRootsScope srs(gch);
5176     workers->run_task(&tsk);
5177   } else {
5178     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5179     GenCollectedHeap::StrongRootsScope srs(gch);
5180     tsk.work(0);
5181   }
5182 
5183   gch->set_par_threads(0);  // 0 ==> non-parallel.
5184   // restore, single-threaded for now, any preserved marks
5185   // as a result of work_q overflow
5186   restore_preserved_marks_if_any();
5187 }
5188 
5189 // Non-parallel version of remark
5190 void CMSCollector::do_remark_non_parallel() {
5191   ResourceMark rm;
5192   HandleMark   hm;
5193   GenCollectedHeap* gch = GenCollectedHeap::heap();
5194   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5195 
5196   MarkRefsIntoAndScanClosure
5197     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5198              &_markStack, this,
5199              false /* should_yield */, false /* not precleaning */);
5200   MarkFromDirtyCardsClosure
5201     markFromDirtyCardsClosure(this, _span,
5202                               NULL,  // space is set further below
5203                               &_markBitMap, &_markStack, &mrias_cl);
5204   {
5205     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5206     // Iterate over the dirty cards, setting the corresponding bits in the
5207     // mod union table.
5208     {
5209       ModUnionClosure modUnionClosure(&_modUnionTable);
5210       _ct->ct_bs()->dirty_card_iterate(
5211                       _cmsGen->used_region(),
5212                       &modUnionClosure);
5213     }
5214     // Having transferred these marks into the modUnionTable, we just need
5215     // to rescan the marked objects on the dirty cards in the modUnionTable.
5216     // The initial marking may have been done during an asynchronous
5217     // collection so there may be dirty bits in the mod-union table.
5218     const int alignment =
5219       CardTableModRefBS::card_size * BitsPerWord;
5220     {
5221       // ... First handle dirty cards in CMS gen
5222       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5223       MemRegion ur = _cmsGen->used_region();
5224       HeapWord* lb = ur.start();
5225       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5226       MemRegion cms_span(lb, ub);
5227       _modUnionTable.dirty_range_iterate_clear(cms_span,
5228                                                &markFromDirtyCardsClosure);
5229       verify_work_stacks_empty();
5230       if (PrintCMSStatistics != 0) {
5231         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5232           markFromDirtyCardsClosure.num_dirty_cards());
5233       }
5234     }
5235   }
5236   if (VerifyDuringGC &&
5237       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5238     HandleMark hm;  // Discard invalid handles created during verification
5239     Universe::verify();
5240   }
5241   {
5242     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5243 
5244     verify_work_stacks_empty();
5245 
5246     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5247     GenCollectedHeap::StrongRootsScope srs(gch);
5248 
5249     gch->gen_process_roots(_cmsGen->level(),
5250                            true,  // younger gens as roots
5251                            false, // use the local StrongRootsScope
5252                            SharedHeap::ScanningOption(roots_scanning_options()),
5253                            should_unload_classes(),
5254                            &mrias_cl,
5255                            NULL,
5256                            NULL); // The dirty klasses will be handled below
5257 
5258     assert(should_unload_classes()
5259            || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5260            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5261   }
5262 
5263   {
5264     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5265 
5266     verify_work_stacks_empty();
5267 
5268     // Scan all class loader data objects that might have been introduced
5269     // during concurrent marking.
5270     ResourceMark rm;
5271     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5272     for (int i = 0; i < array->length(); i++) {
5273       mrias_cl.do_class_loader_data(array->at(i));
5274     }
5275 
5276     // We don't need to keep track of new CLDs anymore.
5277     ClassLoaderDataGraph::remember_new_clds(false);
5278 
5279     verify_work_stacks_empty();
5280   }
5281 
5282   {
5283     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5284 
5285     verify_work_stacks_empty();
5286 
5287     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5288     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5289 
5290     verify_work_stacks_empty();
5291   }
5292 
5293   // We might have added oops to ClassLoaderData::_handles during the
5294   // concurrent marking phase. These oops point to newly allocated objects
5295   // that are guaranteed to be kept alive. Either by the direct allocation
5296   // code, or when the young collector processes the roots. Hence,
5297   // we don't have to revisit the _handles block during the remark phase.
5298 
5299   verify_work_stacks_empty();
5300   // Restore evacuated mark words, if any, used for overflow list links
5301   if (!CMSOverflowEarlyRestoration) {
5302     restore_preserved_marks_if_any();
5303   }
5304   verify_overflow_empty();
5305 }
5306 
5307 ////////////////////////////////////////////////////////
5308 // Parallel Reference Processing Task Proxy Class
5309 ////////////////////////////////////////////////////////
5310 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5311   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5312   CMSCollector*          _collector;
5313   CMSBitMap*             _mark_bit_map;
5314   const MemRegion        _span;
5315   ProcessTask&           _task;
5316 
5317 public:
5318   CMSRefProcTaskProxy(ProcessTask&     task,
5319                       CMSCollector*    collector,
5320                       const MemRegion& span,
5321                       CMSBitMap*       mark_bit_map,
5322                       AbstractWorkGang* workers,
5323                       OopTaskQueueSet* task_queues):
5324     // XXX Should superclass AGTWOQ also know about AWG since it knows
5325     // about the task_queues used by the AWG? Then it could initialize
5326     // the terminator() object. See 6984287. The set_for_termination()
5327     // below is a temporary band-aid for the regression in 6984287.
5328     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5329       task_queues),
5330     _task(task),
5331     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5332   {
5333     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5334            "Inconsistency in _span");
5335     set_for_termination(workers->active_workers());
5336   }
5337 
5338   OopTaskQueueSet* task_queues() { return queues(); }
5339 
5340   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5341 
5342   void do_work_steal(int i,
5343                      CMSParDrainMarkingStackClosure* drain,
5344                      CMSParKeepAliveClosure* keep_alive,
5345                      int* seed);
5346 
5347   virtual void work(uint worker_id);
5348 };
5349 
5350 void CMSRefProcTaskProxy::work(uint worker_id) {
5351   ResourceMark rm;
5352   HandleMark hm;
5353   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5354   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5355                                         _mark_bit_map,
5356                                         work_queue(worker_id));
5357   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5358                                                  _mark_bit_map,
5359                                                  work_queue(worker_id));
5360   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5361   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5362   if (_task.marks_oops_alive()) {
5363     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5364                   _collector->hash_seed(worker_id));
5365   }
5366   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5367   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5368 }
5369 
5370 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5371   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5372   EnqueueTask& _task;
5373 
5374 public:
5375   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5376     : AbstractGangTask("Enqueue reference objects in parallel"),
5377       _task(task)
5378   { }
5379 
5380   virtual void work(uint worker_id)
5381   {
5382     _task.work(worker_id);
5383   }
5384 };
5385 
5386 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5387   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5388    _span(span),
5389    _bit_map(bit_map),
5390    _work_queue(work_queue),
5391    _mark_and_push(collector, span, bit_map, work_queue),
5392    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5393                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5394 { }
5395 
5396 // . see if we can share work_queues with ParNew? XXX
5397 void CMSRefProcTaskProxy::do_work_steal(int i,
5398   CMSParDrainMarkingStackClosure* drain,
5399   CMSParKeepAliveClosure* keep_alive,
5400   int* seed) {
5401   OopTaskQueue* work_q = work_queue(i);
5402   NOT_PRODUCT(int num_steals = 0;)
5403   oop obj_to_scan;
5404 
5405   while (true) {
5406     // Completely finish any left over work from (an) earlier round(s)
5407     drain->trim_queue(0);
5408     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5409                                          (size_t)ParGCDesiredObjsFromOverflowList);
5410     // Now check if there's any work in the overflow list
5411     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5412     // only affects the number of attempts made to get work from the
5413     // overflow list and does not affect the number of workers.  Just
5414     // pass ParallelGCThreads so this behavior is unchanged.
5415     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5416                                                 work_q,
5417                                                 ParallelGCThreads)) {
5418       // Found something in global overflow list;
5419       // not yet ready to go stealing work from others.
5420       // We'd like to assert(work_q->size() != 0, ...)
5421       // because we just took work from the overflow list,
5422       // but of course we can't, since all of that might have
5423       // been already stolen from us.
5424       continue;
5425     }
5426     // Verify that we have no work before we resort to stealing
5427     assert(work_q->size() == 0, "Have work, shouldn't steal");
5428     // Try to steal from other queues that have work
5429     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5430       NOT_PRODUCT(num_steals++;)
5431       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5432       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5433       // Do scanning work
5434       obj_to_scan->oop_iterate(keep_alive);
5435       // Loop around, finish this work, and try to steal some more
5436     } else if (terminator()->offer_termination()) {
5437       break;  // nirvana from the infinite cycle
5438     }
5439   }
5440   NOT_PRODUCT(
5441     if (PrintCMSStatistics != 0) {
5442       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5443     }
5444   )
5445 }
5446 
5447 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5448 {
5449   GenCollectedHeap* gch = GenCollectedHeap::heap();
5450   FlexibleWorkGang* workers = gch->workers();
5451   assert(workers != NULL, "Need parallel worker threads.");
5452   CMSRefProcTaskProxy rp_task(task, &_collector,
5453                               _collector.ref_processor()->span(),
5454                               _collector.markBitMap(),
5455                               workers, _collector.task_queues());
5456   workers->run_task(&rp_task);
5457 }
5458 
5459 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5460 {
5461 
5462   GenCollectedHeap* gch = GenCollectedHeap::heap();
5463   FlexibleWorkGang* workers = gch->workers();
5464   assert(workers != NULL, "Need parallel worker threads.");
5465   CMSRefEnqueueTaskProxy enq_task(task);
5466   workers->run_task(&enq_task);
5467 }
5468 
5469 void CMSCollector::refProcessingWork() {
5470   ResourceMark rm;
5471   HandleMark   hm;
5472 
5473   ReferenceProcessor* rp = ref_processor();
5474   assert(rp->span().equals(_span), "Spans should be equal");
5475   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5476   // Process weak references.
5477   rp->setup_policy(false);
5478   verify_work_stacks_empty();
5479 
5480   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5481                                           &_markStack, false /* !preclean */);
5482   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5483                                 _span, &_markBitMap, &_markStack,
5484                                 &cmsKeepAliveClosure, false /* !preclean */);
5485   {
5486     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5487 
5488     ReferenceProcessorStats stats;
5489     if (rp->processing_is_mt()) {
5490       // Set the degree of MT here.  If the discovery is done MT, there
5491       // may have been a different number of threads doing the discovery
5492       // and a different number of discovered lists may have Ref objects.
5493       // That is OK as long as the Reference lists are balanced (see
5494       // balance_all_queues() and balance_queues()).
5495       GenCollectedHeap* gch = GenCollectedHeap::heap();
5496       int active_workers = ParallelGCThreads;
5497       FlexibleWorkGang* workers = gch->workers();
5498       if (workers != NULL) {
5499         active_workers = workers->active_workers();
5500         // The expectation is that active_workers will have already
5501         // been set to a reasonable value.  If it has not been set,
5502         // investigate.
5503         assert(active_workers > 0, "Should have been set during scavenge");
5504       }
5505       rp->set_active_mt_degree(active_workers);
5506       CMSRefProcTaskExecutor task_executor(*this);
5507       stats = rp->process_discovered_references(&_is_alive_closure,
5508                                         &cmsKeepAliveClosure,
5509                                         &cmsDrainMarkingStackClosure,
5510                                         &task_executor,
5511                                         _gc_timer_cm,
5512                                         _gc_tracer_cm->gc_id());
5513     } else {
5514       stats = rp->process_discovered_references(&_is_alive_closure,
5515                                         &cmsKeepAliveClosure,
5516                                         &cmsDrainMarkingStackClosure,
5517                                         NULL,
5518                                         _gc_timer_cm,
5519                                         _gc_tracer_cm->gc_id());
5520     }
5521     _gc_tracer_cm->report_gc_reference_stats(stats);
5522 
5523   }
5524 
5525   // This is the point where the entire marking should have completed.
5526   verify_work_stacks_empty();
5527 
5528   if (should_unload_classes()) {
5529     {
5530       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5531 
5532       // Unload classes and purge the SystemDictionary.
5533       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5534 
5535       // Unload nmethods.
5536       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5537 
5538       // Prune dead klasses from subklass/sibling/implementor lists.
5539       Klass::clean_weak_klass_links(&_is_alive_closure);
5540     }
5541 
5542     {
5543       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5544       // Clean up unreferenced symbols in symbol table.
5545       SymbolTable::unlink();
5546     }
5547 
5548     {
5549       GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5550       // Delete entries for dead interned strings.
5551       StringTable::unlink(&_is_alive_closure);
5552     }
5553   }
5554 
5555 
5556   // Restore any preserved marks as a result of mark stack or
5557   // work queue overflow
5558   restore_preserved_marks_if_any();  // done single-threaded for now
5559 
5560   rp->set_enqueuing_is_done(true);
5561   if (rp->processing_is_mt()) {
5562     rp->balance_all_queues();
5563     CMSRefProcTaskExecutor task_executor(*this);
5564     rp->enqueue_discovered_references(&task_executor);
5565   } else {
5566     rp->enqueue_discovered_references(NULL);
5567   }
5568   rp->verify_no_references_recorded();
5569   assert(!rp->discovery_enabled(), "should have been disabled");
5570 }
5571 
5572 #ifndef PRODUCT
5573 void CMSCollector::check_correct_thread_executing() {
5574   Thread* t = Thread::current();
5575   // Only the VM thread or the CMS thread should be here.
5576   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5577          "Unexpected thread type");
5578   // If this is the vm thread, the foreground process
5579   // should not be waiting.  Note that _foregroundGCIsActive is
5580   // true while the foreground collector is waiting.
5581   if (_foregroundGCShouldWait) {
5582     // We cannot be the VM thread
5583     assert(t->is_ConcurrentGC_thread(),
5584            "Should be CMS thread");
5585   } else {
5586     // We can be the CMS thread only if we are in a stop-world
5587     // phase of CMS collection.
5588     if (t->is_ConcurrentGC_thread()) {
5589       assert(_collectorState == InitialMarking ||
5590              _collectorState == FinalMarking,
5591              "Should be a stop-world phase");
5592       // The CMS thread should be holding the CMS_token.
5593       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5594              "Potential interference with concurrently "
5595              "executing VM thread");
5596     }
5597   }
5598 }
5599 #endif
5600 
5601 void CMSCollector::sweep() {
5602   assert(_collectorState == Sweeping, "just checking");
5603   check_correct_thread_executing();
5604   verify_work_stacks_empty();
5605   verify_overflow_empty();
5606   increment_sweep_count();
5607   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5608 
5609   _inter_sweep_timer.stop();
5610   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5611 
5612   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5613   _intra_sweep_timer.reset();
5614   _intra_sweep_timer.start();
5615   {
5616     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5617     CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5618     // First sweep the old gen
5619     {
5620       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5621                                bitMapLock());
5622       sweepWork(_cmsGen);
5623     }
5624 
5625     // Update Universe::_heap_*_at_gc figures.
5626     // We need all the free list locks to make the abstract state
5627     // transition from Sweeping to Resetting. See detailed note
5628     // further below.
5629     {
5630       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5631       // Update heap occupancy information which is used as
5632       // input to soft ref clearing policy at the next gc.
5633       Universe::update_heap_info_at_gc();
5634       _collectorState = Resizing;
5635     }
5636   }
5637   verify_work_stacks_empty();
5638   verify_overflow_empty();
5639 
5640   if (should_unload_classes()) {
5641     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5642     // requires that the virtual spaces are stable and not deleted.
5643     ClassLoaderDataGraph::set_should_purge(true);
5644   }
5645 
5646   _intra_sweep_timer.stop();
5647   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5648 
5649   _inter_sweep_timer.reset();
5650   _inter_sweep_timer.start();
5651 
5652   // We need to use a monotonically non-decreasing time in ms
5653   // or we will see time-warp warnings and os::javaTimeMillis()
5654   // does not guarantee monotonicity.
5655   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5656   update_time_of_last_gc(now);
5657 
5658   // NOTE on abstract state transitions:
5659   // Mutators allocate-live and/or mark the mod-union table dirty
5660   // based on the state of the collection.  The former is done in
5661   // the interval [Marking, Sweeping] and the latter in the interval
5662   // [Marking, Sweeping).  Thus the transitions into the Marking state
5663   // and out of the Sweeping state must be synchronously visible
5664   // globally to the mutators.
5665   // The transition into the Marking state happens with the world
5666   // stopped so the mutators will globally see it.  Sweeping is
5667   // done asynchronously by the background collector so the transition
5668   // from the Sweeping state to the Resizing state must be done
5669   // under the freelistLock (as is the check for whether to
5670   // allocate-live and whether to dirty the mod-union table).
5671   assert(_collectorState == Resizing, "Change of collector state to"
5672     " Resizing must be done under the freelistLocks (plural)");
5673 
5674   // Now that sweeping has been completed, we clear
5675   // the incremental_collection_failed flag,
5676   // thus inviting a younger gen collection to promote into
5677   // this generation. If such a promotion may still fail,
5678   // the flag will be set again when a young collection is
5679   // attempted.
5680   GenCollectedHeap* gch = GenCollectedHeap::heap();
5681   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5682   gch->update_full_collections_completed(_collection_count_start);
5683 }
5684 
5685 // FIX ME!!! Looks like this belongs in CFLSpace, with
5686 // CMSGen merely delegating to it.
5687 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5688   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5689   HeapWord*  minAddr        = _cmsSpace->bottom();
5690   HeapWord*  largestAddr    =
5691     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5692   if (largestAddr == NULL) {
5693     // The dictionary appears to be empty.  In this case
5694     // try to coalesce at the end of the heap.
5695     largestAddr = _cmsSpace->end();
5696   }
5697   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5698   size_t nearLargestOffset =
5699     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5700   if (PrintFLSStatistics != 0) {
5701     gclog_or_tty->print_cr(
5702       "CMS: Large Block: " PTR_FORMAT ";"
5703       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5704       largestAddr,
5705       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
5706   }
5707   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5708 }
5709 
5710 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5711   return addr >= _cmsSpace->nearLargestChunk();
5712 }
5713 
5714 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5715   return _cmsSpace->find_chunk_at_end();
5716 }
5717 
5718 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5719                                                     bool full) {
5720   // The next lower level has been collected.  Gather any statistics
5721   // that are of interest at this point.
5722   if (!full && (current_level + 1) == level()) {
5723     // Gather statistics on the young generation collection.
5724     collector()->stats().record_gc0_end(used());
5725   }
5726 }
5727 
5728 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
5729   // We iterate over the space(s) underlying this generation,
5730   // checking the mark bit map to see if the bits corresponding
5731   // to specific blocks are marked or not. Blocks that are
5732   // marked are live and are not swept up. All remaining blocks
5733   // are swept up, with coalescing on-the-fly as we sweep up
5734   // contiguous free and/or garbage blocks:
5735   // We need to ensure that the sweeper synchronizes with allocators
5736   // and stop-the-world collectors. In particular, the following
5737   // locks are used:
5738   // . CMS token: if this is held, a stop the world collection cannot occur
5739   // . freelistLock: if this is held no allocation can occur from this
5740   //                 generation by another thread
5741   // . bitMapLock: if this is held, no other thread can access or update
5742   //
5743 
5744   // Note that we need to hold the freelistLock if we use
5745   // block iterate below; else the iterator might go awry if
5746   // a mutator (or promotion) causes block contents to change
5747   // (for instance if the allocator divvies up a block).
5748   // If we hold the free list lock, for all practical purposes
5749   // young generation GC's can't occur (they'll usually need to
5750   // promote), so we might as well prevent all young generation
5751   // GC's while we do a sweeping step. For the same reason, we might
5752   // as well take the bit map lock for the entire duration
5753 
5754   // check that we hold the requisite locks
5755   assert(have_cms_token(), "Should hold cms token");
5756   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5757   assert_lock_strong(gen->freelistLock());
5758   assert_lock_strong(bitMapLock());
5759 
5760   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5761   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5762   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5763                                       _inter_sweep_estimate.padded_average(),
5764                                       _intra_sweep_estimate.padded_average());
5765   gen->setNearLargestChunk();
5766 
5767   {
5768     SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
5769     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5770     // We need to free-up/coalesce garbage/blocks from a
5771     // co-terminal free run. This is done in the SweepClosure
5772     // destructor; so, do not remove this scope, else the
5773     // end-of-sweep-census below will be off by a little bit.
5774   }
5775   gen->cmsSpace()->sweep_completed();
5776   gen->cmsSpace()->endSweepFLCensus(sweep_count());
5777   if (should_unload_classes()) {                // unloaded classes this cycle,
5778     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5779   } else {                                      // did not unload classes,
5780     _concurrent_cycles_since_last_unload++;     // ... increment count
5781   }
5782 }
5783 
5784 // Reset CMS data structures (for now just the marking bit map)
5785 // preparatory for the next cycle.
5786 void CMSCollector::reset(bool concurrent) {
5787   if (concurrent) {
5788     CMSTokenSyncWithLocks ts(true, bitMapLock());
5789 
5790     // If the state is not "Resetting", the foreground  thread
5791     // has done a collection and the resetting.
5792     if (_collectorState != Resetting) {
5793       assert(_collectorState == Idling, "The state should only change"
5794         " because the foreground collector has finished the collection");
5795       return;
5796     }
5797 
5798     // Clear the mark bitmap (no grey objects to start with)
5799     // for the next cycle.
5800     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5801     CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5802 
5803     HeapWord* curAddr = _markBitMap.startWord();
5804     while (curAddr < _markBitMap.endWord()) {
5805       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5806       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5807       _markBitMap.clear_large_range(chunk);
5808       if (ConcurrentMarkSweepThread::should_yield() &&
5809           !foregroundGCIsActive() &&
5810           CMSYield) {
5811         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5812                "CMS thread should hold CMS token");
5813         assert_lock_strong(bitMapLock());
5814         bitMapLock()->unlock();
5815         ConcurrentMarkSweepThread::desynchronize(true);
5816         stopTimer();
5817         if (PrintCMSStatistics != 0) {
5818           incrementYields();
5819         }
5820 
5821         // See the comment in coordinator_yield()
5822         for (unsigned i = 0; i < CMSYieldSleepCount &&
5823                          ConcurrentMarkSweepThread::should_yield() &&
5824                          !CMSCollector::foregroundGCIsActive(); ++i) {
5825           os::sleep(Thread::current(), 1, false);
5826         }
5827 
5828         ConcurrentMarkSweepThread::synchronize(true);
5829         bitMapLock()->lock_without_safepoint_check();
5830         startTimer();
5831       }
5832       curAddr = chunk.end();
5833     }
5834     // A successful mostly concurrent collection has been done.
5835     // Because only the full (i.e., concurrent mode failure) collections
5836     // are being measured for gc overhead limits, clean the "near" flag
5837     // and count.
5838     size_policy()->reset_gc_overhead_limit_count();
5839     _collectorState = Idling;
5840   } else {
5841     // already have the lock
5842     assert(_collectorState == Resetting, "just checking");
5843     assert_lock_strong(bitMapLock());
5844     _markBitMap.clear_all();
5845     _collectorState = Idling;
5846   }
5847 
5848   register_gc_end();
5849 }
5850 
5851 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5852   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5853   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5854   TraceCollectorStats tcs(counters());
5855 
5856   switch (op) {
5857     case CMS_op_checkpointRootsInitial: {
5858       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5859       checkpointRootsInitial();
5860       if (PrintGC) {
5861         _cmsGen->printOccupancy("initial-mark");
5862       }
5863       break;
5864     }
5865     case CMS_op_checkpointRootsFinal: {
5866       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5867       checkpointRootsFinal();
5868       if (PrintGC) {
5869         _cmsGen->printOccupancy("remark");
5870       }
5871       break;
5872     }
5873     default:
5874       fatal("No such CMS_op");
5875   }
5876 }
5877 
5878 #ifndef PRODUCT
5879 size_t const CMSCollector::skip_header_HeapWords() {
5880   return FreeChunk::header_size();
5881 }
5882 
5883 // Try and collect here conditions that should hold when
5884 // CMS thread is exiting. The idea is that the foreground GC
5885 // thread should not be blocked if it wants to terminate
5886 // the CMS thread and yet continue to run the VM for a while
5887 // after that.
5888 void CMSCollector::verify_ok_to_terminate() const {
5889   assert(Thread::current()->is_ConcurrentGC_thread(),
5890          "should be called by CMS thread");
5891   assert(!_foregroundGCShouldWait, "should be false");
5892   // We could check here that all the various low-level locks
5893   // are not held by the CMS thread, but that is overkill; see
5894   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5895   // is checked.
5896 }
5897 #endif
5898 
5899 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5900    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5901           "missing Printezis mark?");
5902   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5903   size_t size = pointer_delta(nextOneAddr + 1, addr);
5904   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5905          "alignment problem");
5906   assert(size >= 3, "Necessary for Printezis marks to work");
5907   return size;
5908 }
5909 
5910 // A variant of the above (block_size_using_printezis_bits()) except
5911 // that we return 0 if the P-bits are not yet set.
5912 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5913   if (_markBitMap.isMarked(addr + 1)) {
5914     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5915     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5916     size_t size = pointer_delta(nextOneAddr + 1, addr);
5917     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5918            "alignment problem");
5919     assert(size >= 3, "Necessary for Printezis marks to work");
5920     return size;
5921   }
5922   return 0;
5923 }
5924 
5925 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5926   size_t sz = 0;
5927   oop p = (oop)addr;
5928   if (p->klass_or_null() != NULL) {
5929     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5930   } else {
5931     sz = block_size_using_printezis_bits(addr);
5932   }
5933   assert(sz > 0, "size must be nonzero");
5934   HeapWord* next_block = addr + sz;
5935   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5936                                              CardTableModRefBS::card_size);
5937   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5938          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5939          "must be different cards");
5940   return next_card;
5941 }
5942 
5943 
5944 // CMS Bit Map Wrapper /////////////////////////////////////////
5945 
5946 // Construct a CMS bit map infrastructure, but don't create the
5947 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5948 // further below.
5949 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5950   _bm(),
5951   _shifter(shifter),
5952   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5953                                     Monitor::_safepoint_check_sometimes) : NULL)
5954 {
5955   _bmStartWord = 0;
5956   _bmWordSize  = 0;
5957 }
5958 
5959 bool CMSBitMap::allocate(MemRegion mr) {
5960   _bmStartWord = mr.start();
5961   _bmWordSize  = mr.word_size();
5962   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5963                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5964   if (!brs.is_reserved()) {
5965     warning("CMS bit map allocation failure");
5966     return false;
5967   }
5968   // For now we'll just commit all of the bit map up front.
5969   // Later on we'll try to be more parsimonious with swap.
5970   if (!_virtual_space.initialize(brs, brs.size())) {
5971     warning("CMS bit map backing store failure");
5972     return false;
5973   }
5974   assert(_virtual_space.committed_size() == brs.size(),
5975          "didn't reserve backing store for all of CMS bit map?");
5976   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5977   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5978          _bmWordSize, "inconsistency in bit map sizing");
5979   _bm.set_size(_bmWordSize >> _shifter);
5980 
5981   // bm.clear(); // can we rely on getting zero'd memory? verify below
5982   assert(isAllClear(),
5983          "Expected zero'd memory from ReservedSpace constructor");
5984   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5985          "consistency check");
5986   return true;
5987 }
5988 
5989 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5990   HeapWord *next_addr, *end_addr, *last_addr;
5991   assert_locked();
5992   assert(covers(mr), "out-of-range error");
5993   // XXX assert that start and end are appropriately aligned
5994   for (next_addr = mr.start(), end_addr = mr.end();
5995        next_addr < end_addr; next_addr = last_addr) {
5996     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5997     last_addr = dirty_region.end();
5998     if (!dirty_region.is_empty()) {
5999       cl->do_MemRegion(dirty_region);
6000     } else {
6001       assert(last_addr == end_addr, "program logic");
6002       return;
6003     }
6004   }
6005 }
6006 
6007 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6008   _bm.print_on_error(st, prefix);
6009 }
6010 
6011 #ifndef PRODUCT
6012 void CMSBitMap::assert_locked() const {
6013   CMSLockVerifier::assert_locked(lock());
6014 }
6015 
6016 bool CMSBitMap::covers(MemRegion mr) const {
6017   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6018   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6019          "size inconsistency");
6020   return (mr.start() >= _bmStartWord) &&
6021          (mr.end()   <= endWord());
6022 }
6023 
6024 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6025     return (start >= _bmStartWord && (start + size) <= endWord());
6026 }
6027 
6028 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6029   // verify that there are no 1 bits in the interval [left, right)
6030   FalseBitMapClosure falseBitMapClosure;
6031   iterate(&falseBitMapClosure, left, right);
6032 }
6033 
6034 void CMSBitMap::region_invariant(MemRegion mr)
6035 {
6036   assert_locked();
6037   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6038   assert(!mr.is_empty(), "unexpected empty region");
6039   assert(covers(mr), "mr should be covered by bit map");
6040   // convert address range into offset range
6041   size_t start_ofs = heapWordToOffset(mr.start());
6042   // Make sure that end() is appropriately aligned
6043   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6044                         (1 << (_shifter+LogHeapWordSize))),
6045          "Misaligned mr.end()");
6046   size_t end_ofs   = heapWordToOffset(mr.end());
6047   assert(end_ofs > start_ofs, "Should mark at least one bit");
6048 }
6049 
6050 #endif
6051 
6052 bool CMSMarkStack::allocate(size_t size) {
6053   // allocate a stack of the requisite depth
6054   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6055                    size * sizeof(oop)));
6056   if (!rs.is_reserved()) {
6057     warning("CMSMarkStack allocation failure");
6058     return false;
6059   }
6060   if (!_virtual_space.initialize(rs, rs.size())) {
6061     warning("CMSMarkStack backing store failure");
6062     return false;
6063   }
6064   assert(_virtual_space.committed_size() == rs.size(),
6065          "didn't reserve backing store for all of CMS stack?");
6066   _base = (oop*)(_virtual_space.low());
6067   _index = 0;
6068   _capacity = size;
6069   NOT_PRODUCT(_max_depth = 0);
6070   return true;
6071 }
6072 
6073 // XXX FIX ME !!! In the MT case we come in here holding a
6074 // leaf lock. For printing we need to take a further lock
6075 // which has lower rank. We need to recalibrate the two
6076 // lock-ranks involved in order to be able to print the
6077 // messages below. (Or defer the printing to the caller.
6078 // For now we take the expedient path of just disabling the
6079 // messages for the problematic case.)
6080 void CMSMarkStack::expand() {
6081   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6082   if (_capacity == MarkStackSizeMax) {
6083     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6084       // We print a warning message only once per CMS cycle.
6085       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6086     }
6087     return;
6088   }
6089   // Double capacity if possible
6090   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6091   // Do not give up existing stack until we have managed to
6092   // get the double capacity that we desired.
6093   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6094                    new_capacity * sizeof(oop)));
6095   if (rs.is_reserved()) {
6096     // Release the backing store associated with old stack
6097     _virtual_space.release();
6098     // Reinitialize virtual space for new stack
6099     if (!_virtual_space.initialize(rs, rs.size())) {
6100       fatal("Not enough swap for expanded marking stack");
6101     }
6102     _base = (oop*)(_virtual_space.low());
6103     _index = 0;
6104     _capacity = new_capacity;
6105   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6106     // Failed to double capacity, continue;
6107     // we print a detail message only once per CMS cycle.
6108     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6109             SIZE_FORMAT"K",
6110             _capacity / K, new_capacity / K);
6111   }
6112 }
6113 
6114 
6115 // Closures
6116 // XXX: there seems to be a lot of code  duplication here;
6117 // should refactor and consolidate common code.
6118 
6119 // This closure is used to mark refs into the CMS generation in
6120 // the CMS bit map. Called at the first checkpoint. This closure
6121 // assumes that we do not need to re-mark dirty cards; if the CMS
6122 // generation on which this is used is not an oldest
6123 // generation then this will lose younger_gen cards!
6124 
6125 MarkRefsIntoClosure::MarkRefsIntoClosure(
6126   MemRegion span, CMSBitMap* bitMap):
6127     _span(span),
6128     _bitMap(bitMap)
6129 {
6130     assert(_ref_processor == NULL, "deliberately left NULL");
6131     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6132 }
6133 
6134 void MarkRefsIntoClosure::do_oop(oop obj) {
6135   // if p points into _span, then mark corresponding bit in _markBitMap
6136   assert(obj->is_oop(), "expected an oop");
6137   HeapWord* addr = (HeapWord*)obj;
6138   if (_span.contains(addr)) {
6139     // this should be made more efficient
6140     _bitMap->mark(addr);
6141   }
6142 }
6143 
6144 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6145 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6146 
6147 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6148   MemRegion span, CMSBitMap* bitMap):
6149     _span(span),
6150     _bitMap(bitMap)
6151 {
6152     assert(_ref_processor == NULL, "deliberately left NULL");
6153     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6154 }
6155 
6156 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6157   // if p points into _span, then mark corresponding bit in _markBitMap
6158   assert(obj->is_oop(), "expected an oop");
6159   HeapWord* addr = (HeapWord*)obj;
6160   if (_span.contains(addr)) {
6161     // this should be made more efficient
6162     _bitMap->par_mark(addr);
6163   }
6164 }
6165 
6166 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6167 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6168 
6169 // A variant of the above, used for CMS marking verification.
6170 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6171   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6172     _span(span),
6173     _verification_bm(verification_bm),
6174     _cms_bm(cms_bm)
6175 {
6176     assert(_ref_processor == NULL, "deliberately left NULL");
6177     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6178 }
6179 
6180 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6181   // if p points into _span, then mark corresponding bit in _markBitMap
6182   assert(obj->is_oop(), "expected an oop");
6183   HeapWord* addr = (HeapWord*)obj;
6184   if (_span.contains(addr)) {
6185     _verification_bm->mark(addr);
6186     if (!_cms_bm->isMarked(addr)) {
6187       oop(addr)->print();
6188       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6189       fatal("... aborting");
6190     }
6191   }
6192 }
6193 
6194 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6195 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6196 
6197 //////////////////////////////////////////////////
6198 // MarkRefsIntoAndScanClosure
6199 //////////////////////////////////////////////////
6200 
6201 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6202                                                        ReferenceProcessor* rp,
6203                                                        CMSBitMap* bit_map,
6204                                                        CMSBitMap* mod_union_table,
6205                                                        CMSMarkStack*  mark_stack,
6206                                                        CMSCollector* collector,
6207                                                        bool should_yield,
6208                                                        bool concurrent_precleaning):
6209   _collector(collector),
6210   _span(span),
6211   _bit_map(bit_map),
6212   _mark_stack(mark_stack),
6213   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6214                       mark_stack, concurrent_precleaning),
6215   _yield(should_yield),
6216   _concurrent_precleaning(concurrent_precleaning),
6217   _freelistLock(NULL)
6218 {
6219   _ref_processor = rp;
6220   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6221 }
6222 
6223 // This closure is used to mark refs into the CMS generation at the
6224 // second (final) checkpoint, and to scan and transitively follow
6225 // the unmarked oops. It is also used during the concurrent precleaning
6226 // phase while scanning objects on dirty cards in the CMS generation.
6227 // The marks are made in the marking bit map and the marking stack is
6228 // used for keeping the (newly) grey objects during the scan.
6229 // The parallel version (Par_...) appears further below.
6230 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6231   if (obj != NULL) {
6232     assert(obj->is_oop(), "expected an oop");
6233     HeapWord* addr = (HeapWord*)obj;
6234     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6235     assert(_collector->overflow_list_is_empty(),
6236            "overflow list should be empty");
6237     if (_span.contains(addr) &&
6238         !_bit_map->isMarked(addr)) {
6239       // mark bit map (object is now grey)
6240       _bit_map->mark(addr);
6241       // push on marking stack (stack should be empty), and drain the
6242       // stack by applying this closure to the oops in the oops popped
6243       // from the stack (i.e. blacken the grey objects)
6244       bool res = _mark_stack->push(obj);
6245       assert(res, "Should have space to push on empty stack");
6246       do {
6247         oop new_oop = _mark_stack->pop();
6248         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6249         assert(_bit_map->isMarked((HeapWord*)new_oop),
6250                "only grey objects on this stack");
6251         // iterate over the oops in this oop, marking and pushing
6252         // the ones in CMS heap (i.e. in _span).
6253         new_oop->oop_iterate(&_pushAndMarkClosure);
6254         // check if it's time to yield
6255         do_yield_check();
6256       } while (!_mark_stack->isEmpty() ||
6257                (!_concurrent_precleaning && take_from_overflow_list()));
6258         // if marking stack is empty, and we are not doing this
6259         // during precleaning, then check the overflow list
6260     }
6261     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6262     assert(_collector->overflow_list_is_empty(),
6263            "overflow list was drained above");
6264     // We could restore evacuated mark words, if any, used for
6265     // overflow list links here because the overflow list is
6266     // provably empty here. That would reduce the maximum
6267     // size requirements for preserved_{oop,mark}_stack.
6268     // But we'll just postpone it until we are all done
6269     // so we can just stream through.
6270     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6271       _collector->restore_preserved_marks_if_any();
6272       assert(_collector->no_preserved_marks(), "No preserved marks");
6273     }
6274     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6275            "All preserved marks should have been restored above");
6276   }
6277 }
6278 
6279 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6280 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6281 
6282 void MarkRefsIntoAndScanClosure::do_yield_work() {
6283   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6284          "CMS thread should hold CMS token");
6285   assert_lock_strong(_freelistLock);
6286   assert_lock_strong(_bit_map->lock());
6287   // relinquish the free_list_lock and bitMaplock()
6288   _bit_map->lock()->unlock();
6289   _freelistLock->unlock();
6290   ConcurrentMarkSweepThread::desynchronize(true);
6291   _collector->stopTimer();
6292   if (PrintCMSStatistics != 0) {
6293     _collector->incrementYields();
6294   }
6295 
6296   // See the comment in coordinator_yield()
6297   for (unsigned i = 0;
6298        i < CMSYieldSleepCount &&
6299        ConcurrentMarkSweepThread::should_yield() &&
6300        !CMSCollector::foregroundGCIsActive();
6301        ++i) {
6302     os::sleep(Thread::current(), 1, false);
6303   }
6304 
6305   ConcurrentMarkSweepThread::synchronize(true);
6306   _freelistLock->lock_without_safepoint_check();
6307   _bit_map->lock()->lock_without_safepoint_check();
6308   _collector->startTimer();
6309 }
6310 
6311 ///////////////////////////////////////////////////////////
6312 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6313 //                                 MarkRefsIntoAndScanClosure
6314 ///////////////////////////////////////////////////////////
6315 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6316   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6317   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6318   _span(span),
6319   _bit_map(bit_map),
6320   _work_queue(work_queue),
6321   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6322                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6323   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6324 {
6325   _ref_processor = rp;
6326   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6327 }
6328 
6329 // This closure is used to mark refs into the CMS generation at the
6330 // second (final) checkpoint, and to scan and transitively follow
6331 // the unmarked oops. The marks are made in the marking bit map and
6332 // the work_queue is used for keeping the (newly) grey objects during
6333 // the scan phase whence they are also available for stealing by parallel
6334 // threads. Since the marking bit map is shared, updates are
6335 // synchronized (via CAS).
6336 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6337   if (obj != NULL) {
6338     // Ignore mark word because this could be an already marked oop
6339     // that may be chained at the end of the overflow list.
6340     assert(obj->is_oop(true), "expected an oop");
6341     HeapWord* addr = (HeapWord*)obj;
6342     if (_span.contains(addr) &&
6343         !_bit_map->isMarked(addr)) {
6344       // mark bit map (object will become grey):
6345       // It is possible for several threads to be
6346       // trying to "claim" this object concurrently;
6347       // the unique thread that succeeds in marking the
6348       // object first will do the subsequent push on
6349       // to the work queue (or overflow list).
6350       if (_bit_map->par_mark(addr)) {
6351         // push on work_queue (which may not be empty), and trim the
6352         // queue to an appropriate length by applying this closure to
6353         // the oops in the oops popped from the stack (i.e. blacken the
6354         // grey objects)
6355         bool res = _work_queue->push(obj);
6356         assert(res, "Low water mark should be less than capacity?");
6357         trim_queue(_low_water_mark);
6358       } // Else, another thread claimed the object
6359     }
6360   }
6361 }
6362 
6363 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6364 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6365 
6366 // This closure is used to rescan the marked objects on the dirty cards
6367 // in the mod union table and the card table proper.
6368 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6369   oop p, MemRegion mr) {
6370 
6371   size_t size = 0;
6372   HeapWord* addr = (HeapWord*)p;
6373   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6374   assert(_span.contains(addr), "we are scanning the CMS generation");
6375   // check if it's time to yield
6376   if (do_yield_check()) {
6377     // We yielded for some foreground stop-world work,
6378     // and we have been asked to abort this ongoing preclean cycle.
6379     return 0;
6380   }
6381   if (_bitMap->isMarked(addr)) {
6382     // it's marked; is it potentially uninitialized?
6383     if (p->klass_or_null() != NULL) {
6384         // an initialized object; ignore mark word in verification below
6385         // since we are running concurrent with mutators
6386         assert(p->is_oop(true), "should be an oop");
6387         if (p->is_objArray()) {
6388           // objArrays are precisely marked; restrict scanning
6389           // to dirty cards only.
6390           size = CompactibleFreeListSpace::adjustObjectSize(
6391                    p->oop_iterate(_scanningClosure, mr));
6392         } else {
6393           // A non-array may have been imprecisely marked; we need
6394           // to scan object in its entirety.
6395           size = CompactibleFreeListSpace::adjustObjectSize(
6396                    p->oop_iterate(_scanningClosure));
6397         }
6398         #ifdef ASSERT
6399           size_t direct_size =
6400             CompactibleFreeListSpace::adjustObjectSize(p->size());
6401           assert(size == direct_size, "Inconsistency in size");
6402           assert(size >= 3, "Necessary for Printezis marks to work");
6403           if (!_bitMap->isMarked(addr+1)) {
6404             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6405           } else {
6406             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6407             assert(_bitMap->isMarked(addr+size-1),
6408                    "inconsistent Printezis mark");
6409           }
6410         #endif // ASSERT
6411     } else {
6412       // An uninitialized object.
6413       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6414       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6415       size = pointer_delta(nextOneAddr + 1, addr);
6416       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6417              "alignment problem");
6418       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6419       // will dirty the card when the klass pointer is installed in the
6420       // object (signaling the completion of initialization).
6421     }
6422   } else {
6423     // Either a not yet marked object or an uninitialized object
6424     if (p->klass_or_null() == NULL) {
6425       // An uninitialized object, skip to the next card, since
6426       // we may not be able to read its P-bits yet.
6427       assert(size == 0, "Initial value");
6428     } else {
6429       // An object not (yet) reached by marking: we merely need to
6430       // compute its size so as to go look at the next block.
6431       assert(p->is_oop(true), "should be an oop");
6432       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6433     }
6434   }
6435   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6436   return size;
6437 }
6438 
6439 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6440   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6441          "CMS thread should hold CMS token");
6442   assert_lock_strong(_freelistLock);
6443   assert_lock_strong(_bitMap->lock());
6444   // relinquish the free_list_lock and bitMaplock()
6445   _bitMap->lock()->unlock();
6446   _freelistLock->unlock();
6447   ConcurrentMarkSweepThread::desynchronize(true);
6448   _collector->stopTimer();
6449   if (PrintCMSStatistics != 0) {
6450     _collector->incrementYields();
6451   }
6452 
6453   // See the comment in coordinator_yield()
6454   for (unsigned i = 0; i < CMSYieldSleepCount &&
6455                    ConcurrentMarkSweepThread::should_yield() &&
6456                    !CMSCollector::foregroundGCIsActive(); ++i) {
6457     os::sleep(Thread::current(), 1, false);
6458   }
6459 
6460   ConcurrentMarkSweepThread::synchronize(true);
6461   _freelistLock->lock_without_safepoint_check();
6462   _bitMap->lock()->lock_without_safepoint_check();
6463   _collector->startTimer();
6464 }
6465 
6466 
6467 //////////////////////////////////////////////////////////////////
6468 // SurvivorSpacePrecleanClosure
6469 //////////////////////////////////////////////////////////////////
6470 // This (single-threaded) closure is used to preclean the oops in
6471 // the survivor spaces.
6472 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6473 
6474   HeapWord* addr = (HeapWord*)p;
6475   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6476   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6477   assert(p->klass_or_null() != NULL, "object should be initialized");
6478   // an initialized object; ignore mark word in verification below
6479   // since we are running concurrent with mutators
6480   assert(p->is_oop(true), "should be an oop");
6481   // Note that we do not yield while we iterate over
6482   // the interior oops of p, pushing the relevant ones
6483   // on our marking stack.
6484   size_t size = p->oop_iterate(_scanning_closure);
6485   do_yield_check();
6486   // Observe that below, we do not abandon the preclean
6487   // phase as soon as we should; rather we empty the
6488   // marking stack before returning. This is to satisfy
6489   // some existing assertions. In general, it may be a
6490   // good idea to abort immediately and complete the marking
6491   // from the grey objects at a later time.
6492   while (!_mark_stack->isEmpty()) {
6493     oop new_oop = _mark_stack->pop();
6494     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6495     assert(_bit_map->isMarked((HeapWord*)new_oop),
6496            "only grey objects on this stack");
6497     // iterate over the oops in this oop, marking and pushing
6498     // the ones in CMS heap (i.e. in _span).
6499     new_oop->oop_iterate(_scanning_closure);
6500     // check if it's time to yield
6501     do_yield_check();
6502   }
6503   unsigned int after_count =
6504     GenCollectedHeap::heap()->total_collections();
6505   bool abort = (_before_count != after_count) ||
6506                _collector->should_abort_preclean();
6507   return abort ? 0 : size;
6508 }
6509 
6510 void SurvivorSpacePrecleanClosure::do_yield_work() {
6511   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6512          "CMS thread should hold CMS token");
6513   assert_lock_strong(_bit_map->lock());
6514   // Relinquish the bit map lock
6515   _bit_map->lock()->unlock();
6516   ConcurrentMarkSweepThread::desynchronize(true);
6517   _collector->stopTimer();
6518   if (PrintCMSStatistics != 0) {
6519     _collector->incrementYields();
6520   }
6521 
6522   // See the comment in coordinator_yield()
6523   for (unsigned i = 0; i < CMSYieldSleepCount &&
6524                        ConcurrentMarkSweepThread::should_yield() &&
6525                        !CMSCollector::foregroundGCIsActive(); ++i) {
6526     os::sleep(Thread::current(), 1, false);
6527   }
6528 
6529   ConcurrentMarkSweepThread::synchronize(true);
6530   _bit_map->lock()->lock_without_safepoint_check();
6531   _collector->startTimer();
6532 }
6533 
6534 // This closure is used to rescan the marked objects on the dirty cards
6535 // in the mod union table and the card table proper. In the parallel
6536 // case, although the bitMap is shared, we do a single read so the
6537 // isMarked() query is "safe".
6538 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6539   // Ignore mark word because we are running concurrent with mutators
6540   assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6541   HeapWord* addr = (HeapWord*)p;
6542   assert(_span.contains(addr), "we are scanning the CMS generation");
6543   bool is_obj_array = false;
6544   #ifdef ASSERT
6545     if (!_parallel) {
6546       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6547       assert(_collector->overflow_list_is_empty(),
6548              "overflow list should be empty");
6549 
6550     }
6551   #endif // ASSERT
6552   if (_bit_map->isMarked(addr)) {
6553     // Obj arrays are precisely marked, non-arrays are not;
6554     // so we scan objArrays precisely and non-arrays in their
6555     // entirety.
6556     if (p->is_objArray()) {
6557       is_obj_array = true;
6558       if (_parallel) {
6559         p->oop_iterate(_par_scan_closure, mr);
6560       } else {
6561         p->oop_iterate(_scan_closure, mr);
6562       }
6563     } else {
6564       if (_parallel) {
6565         p->oop_iterate(_par_scan_closure);
6566       } else {
6567         p->oop_iterate(_scan_closure);
6568       }
6569     }
6570   }
6571   #ifdef ASSERT
6572     if (!_parallel) {
6573       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6574       assert(_collector->overflow_list_is_empty(),
6575              "overflow list should be empty");
6576 
6577     }
6578   #endif // ASSERT
6579   return is_obj_array;
6580 }
6581 
6582 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6583                         MemRegion span,
6584                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
6585                         bool should_yield, bool verifying):
6586   _collector(collector),
6587   _span(span),
6588   _bitMap(bitMap),
6589   _mut(&collector->_modUnionTable),
6590   _markStack(markStack),
6591   _yield(should_yield),
6592   _skipBits(0)
6593 {
6594   assert(_markStack->isEmpty(), "stack should be empty");
6595   _finger = _bitMap->startWord();
6596   _threshold = _finger;
6597   assert(_collector->_restart_addr == NULL, "Sanity check");
6598   assert(_span.contains(_finger), "Out of bounds _finger?");
6599   DEBUG_ONLY(_verifying = verifying;)
6600 }
6601 
6602 void MarkFromRootsClosure::reset(HeapWord* addr) {
6603   assert(_markStack->isEmpty(), "would cause duplicates on stack");
6604   assert(_span.contains(addr), "Out of bounds _finger?");
6605   _finger = addr;
6606   _threshold = (HeapWord*)round_to(
6607                  (intptr_t)_finger, CardTableModRefBS::card_size);
6608 }
6609 
6610 // Should revisit to see if this should be restructured for
6611 // greater efficiency.
6612 bool MarkFromRootsClosure::do_bit(size_t offset) {
6613   if (_skipBits > 0) {
6614     _skipBits--;
6615     return true;
6616   }
6617   // convert offset into a HeapWord*
6618   HeapWord* addr = _bitMap->startWord() + offset;
6619   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6620          "address out of range");
6621   assert(_bitMap->isMarked(addr), "tautology");
6622   if (_bitMap->isMarked(addr+1)) {
6623     // this is an allocated but not yet initialized object
6624     assert(_skipBits == 0, "tautology");
6625     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6626     oop p = oop(addr);
6627     if (p->klass_or_null() == NULL) {
6628       DEBUG_ONLY(if (!_verifying) {)
6629         // We re-dirty the cards on which this object lies and increase
6630         // the _threshold so that we'll come back to scan this object
6631         // during the preclean or remark phase. (CMSCleanOnEnter)
6632         if (CMSCleanOnEnter) {
6633           size_t sz = _collector->block_size_using_printezis_bits(addr);
6634           HeapWord* end_card_addr   = (HeapWord*)round_to(
6635                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6636           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6637           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6638           // Bump _threshold to end_card_addr; note that
6639           // _threshold cannot possibly exceed end_card_addr, anyhow.
6640           // This prevents future clearing of the card as the scan proceeds
6641           // to the right.
6642           assert(_threshold <= end_card_addr,
6643                  "Because we are just scanning into this object");
6644           if (_threshold < end_card_addr) {
6645             _threshold = end_card_addr;
6646           }
6647           if (p->klass_or_null() != NULL) {
6648             // Redirty the range of cards...
6649             _mut->mark_range(redirty_range);
6650           } // ...else the setting of klass will dirty the card anyway.
6651         }
6652       DEBUG_ONLY(})
6653       return true;
6654     }
6655   }
6656   scanOopsInOop(addr);
6657   return true;
6658 }
6659 
6660 // We take a break if we've been at this for a while,
6661 // so as to avoid monopolizing the locks involved.
6662 void MarkFromRootsClosure::do_yield_work() {
6663   // First give up the locks, then yield, then re-lock
6664   // We should probably use a constructor/destructor idiom to
6665   // do this unlock/lock or modify the MutexUnlocker class to
6666   // serve our purpose. XXX
6667   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6668          "CMS thread should hold CMS token");
6669   assert_lock_strong(_bitMap->lock());
6670   _bitMap->lock()->unlock();
6671   ConcurrentMarkSweepThread::desynchronize(true);
6672   _collector->stopTimer();
6673   if (PrintCMSStatistics != 0) {
6674     _collector->incrementYields();
6675   }
6676 
6677   // See the comment in coordinator_yield()
6678   for (unsigned i = 0; i < CMSYieldSleepCount &&
6679                        ConcurrentMarkSweepThread::should_yield() &&
6680                        !CMSCollector::foregroundGCIsActive(); ++i) {
6681     os::sleep(Thread::current(), 1, false);
6682   }
6683 
6684   ConcurrentMarkSweepThread::synchronize(true);
6685   _bitMap->lock()->lock_without_safepoint_check();
6686   _collector->startTimer();
6687 }
6688 
6689 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6690   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6691   assert(_markStack->isEmpty(),
6692          "should drain stack to limit stack usage");
6693   // convert ptr to an oop preparatory to scanning
6694   oop obj = oop(ptr);
6695   // Ignore mark word in verification below, since we
6696   // may be running concurrent with mutators.
6697   assert(obj->is_oop(true), "should be an oop");
6698   assert(_finger <= ptr, "_finger runneth ahead");
6699   // advance the finger to right end of this object
6700   _finger = ptr + obj->size();
6701   assert(_finger > ptr, "we just incremented it above");
6702   // On large heaps, it may take us some time to get through
6703   // the marking phase. During
6704   // this time it's possible that a lot of mutations have
6705   // accumulated in the card table and the mod union table --
6706   // these mutation records are redundant until we have
6707   // actually traced into the corresponding card.
6708   // Here, we check whether advancing the finger would make
6709   // us cross into a new card, and if so clear corresponding
6710   // cards in the MUT (preclean them in the card-table in the
6711   // future).
6712 
6713   DEBUG_ONLY(if (!_verifying) {)
6714     // The clean-on-enter optimization is disabled by default,
6715     // until we fix 6178663.
6716     if (CMSCleanOnEnter && (_finger > _threshold)) {
6717       // [_threshold, _finger) represents the interval
6718       // of cards to be cleared  in MUT (or precleaned in card table).
6719       // The set of cards to be cleared is all those that overlap
6720       // with the interval [_threshold, _finger); note that
6721       // _threshold is always kept card-aligned but _finger isn't
6722       // always card-aligned.
6723       HeapWord* old_threshold = _threshold;
6724       assert(old_threshold == (HeapWord*)round_to(
6725               (intptr_t)old_threshold, CardTableModRefBS::card_size),
6726              "_threshold should always be card-aligned");
6727       _threshold = (HeapWord*)round_to(
6728                      (intptr_t)_finger, CardTableModRefBS::card_size);
6729       MemRegion mr(old_threshold, _threshold);
6730       assert(!mr.is_empty(), "Control point invariant");
6731       assert(_span.contains(mr), "Should clear within span");
6732       _mut->clear_range(mr);
6733     }
6734   DEBUG_ONLY(})
6735   // Note: the finger doesn't advance while we drain
6736   // the stack below.
6737   PushOrMarkClosure pushOrMarkClosure(_collector,
6738                                       _span, _bitMap, _markStack,
6739                                       _finger, this);
6740   bool res = _markStack->push(obj);
6741   assert(res, "Empty non-zero size stack should have space for single push");
6742   while (!_markStack->isEmpty()) {
6743     oop new_oop = _markStack->pop();
6744     // Skip verifying header mark word below because we are
6745     // running concurrent with mutators.
6746     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6747     // now scan this oop's oops
6748     new_oop->oop_iterate(&pushOrMarkClosure);
6749     do_yield_check();
6750   }
6751   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6752 }
6753 
6754 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6755                        CMSCollector* collector, MemRegion span,
6756                        CMSBitMap* bit_map,
6757                        OopTaskQueue* work_queue,
6758                        CMSMarkStack*  overflow_stack):
6759   _collector(collector),
6760   _whole_span(collector->_span),
6761   _span(span),
6762   _bit_map(bit_map),
6763   _mut(&collector->_modUnionTable),
6764   _work_queue(work_queue),
6765   _overflow_stack(overflow_stack),
6766   _skip_bits(0),
6767   _task(task)
6768 {
6769   assert(_work_queue->size() == 0, "work_queue should be empty");
6770   _finger = span.start();
6771   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6772   assert(_span.contains(_finger), "Out of bounds _finger?");
6773 }
6774 
6775 // Should revisit to see if this should be restructured for
6776 // greater efficiency.
6777 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
6778   if (_skip_bits > 0) {
6779     _skip_bits--;
6780     return true;
6781   }
6782   // convert offset into a HeapWord*
6783   HeapWord* addr = _bit_map->startWord() + offset;
6784   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6785          "address out of range");
6786   assert(_bit_map->isMarked(addr), "tautology");
6787   if (_bit_map->isMarked(addr+1)) {
6788     // this is an allocated object that might not yet be initialized
6789     assert(_skip_bits == 0, "tautology");
6790     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6791     oop p = oop(addr);
6792     if (p->klass_or_null() == NULL) {
6793       // in the case of Clean-on-Enter optimization, redirty card
6794       // and avoid clearing card by increasing  the threshold.
6795       return true;
6796     }
6797   }
6798   scan_oops_in_oop(addr);
6799   return true;
6800 }
6801 
6802 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6803   assert(_bit_map->isMarked(ptr), "expected bit to be set");
6804   // Should we assert that our work queue is empty or
6805   // below some drain limit?
6806   assert(_work_queue->size() == 0,
6807          "should drain stack to limit stack usage");
6808   // convert ptr to an oop preparatory to scanning
6809   oop obj = oop(ptr);
6810   // Ignore mark word in verification below, since we
6811   // may be running concurrent with mutators.
6812   assert(obj->is_oop(true), "should be an oop");
6813   assert(_finger <= ptr, "_finger runneth ahead");
6814   // advance the finger to right end of this object
6815   _finger = ptr + obj->size();
6816   assert(_finger > ptr, "we just incremented it above");
6817   // On large heaps, it may take us some time to get through
6818   // the marking phase. During
6819   // this time it's possible that a lot of mutations have
6820   // accumulated in the card table and the mod union table --
6821   // these mutation records are redundant until we have
6822   // actually traced into the corresponding card.
6823   // Here, we check whether advancing the finger would make
6824   // us cross into a new card, and if so clear corresponding
6825   // cards in the MUT (preclean them in the card-table in the
6826   // future).
6827 
6828   // The clean-on-enter optimization is disabled by default,
6829   // until we fix 6178663.
6830   if (CMSCleanOnEnter && (_finger > _threshold)) {
6831     // [_threshold, _finger) represents the interval
6832     // of cards to be cleared  in MUT (or precleaned in card table).
6833     // The set of cards to be cleared is all those that overlap
6834     // with the interval [_threshold, _finger); note that
6835     // _threshold is always kept card-aligned but _finger isn't
6836     // always card-aligned.
6837     HeapWord* old_threshold = _threshold;
6838     assert(old_threshold == (HeapWord*)round_to(
6839             (intptr_t)old_threshold, CardTableModRefBS::card_size),
6840            "_threshold should always be card-aligned");
6841     _threshold = (HeapWord*)round_to(
6842                    (intptr_t)_finger, CardTableModRefBS::card_size);
6843     MemRegion mr(old_threshold, _threshold);
6844     assert(!mr.is_empty(), "Control point invariant");
6845     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6846     _mut->clear_range(mr);
6847   }
6848 
6849   // Note: the local finger doesn't advance while we drain
6850   // the stack below, but the global finger sure can and will.
6851   HeapWord** gfa = _task->global_finger_addr();
6852   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
6853                                       _span, _bit_map,
6854                                       _work_queue,
6855                                       _overflow_stack,
6856                                       _finger,
6857                                       gfa, this);
6858   bool res = _work_queue->push(obj);   // overflow could occur here
6859   assert(res, "Will hold once we use workqueues");
6860   while (true) {
6861     oop new_oop;
6862     if (!_work_queue->pop_local(new_oop)) {
6863       // We emptied our work_queue; check if there's stuff that can
6864       // be gotten from the overflow stack.
6865       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6866             _overflow_stack, _work_queue)) {
6867         do_yield_check();
6868         continue;
6869       } else {  // done
6870         break;
6871       }
6872     }
6873     // Skip verifying header mark word below because we are
6874     // running concurrent with mutators.
6875     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6876     // now scan this oop's oops
6877     new_oop->oop_iterate(&pushOrMarkClosure);
6878     do_yield_check();
6879   }
6880   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6881 }
6882 
6883 // Yield in response to a request from VM Thread or
6884 // from mutators.
6885 void Par_MarkFromRootsClosure::do_yield_work() {
6886   assert(_task != NULL, "sanity");
6887   _task->yield();
6888 }
6889 
6890 // A variant of the above used for verifying CMS marking work.
6891 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6892                         MemRegion span,
6893                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6894                         CMSMarkStack*  mark_stack):
6895   _collector(collector),
6896   _span(span),
6897   _verification_bm(verification_bm),
6898   _cms_bm(cms_bm),
6899   _mark_stack(mark_stack),
6900   _pam_verify_closure(collector, span, verification_bm, cms_bm,
6901                       mark_stack)
6902 {
6903   assert(_mark_stack->isEmpty(), "stack should be empty");
6904   _finger = _verification_bm->startWord();
6905   assert(_collector->_restart_addr == NULL, "Sanity check");
6906   assert(_span.contains(_finger), "Out of bounds _finger?");
6907 }
6908 
6909 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6910   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6911   assert(_span.contains(addr), "Out of bounds _finger?");
6912   _finger = addr;
6913 }
6914 
6915 // Should revisit to see if this should be restructured for
6916 // greater efficiency.
6917 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6918   // convert offset into a HeapWord*
6919   HeapWord* addr = _verification_bm->startWord() + offset;
6920   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6921          "address out of range");
6922   assert(_verification_bm->isMarked(addr), "tautology");
6923   assert(_cms_bm->isMarked(addr), "tautology");
6924 
6925   assert(_mark_stack->isEmpty(),
6926          "should drain stack to limit stack usage");
6927   // convert addr to an oop preparatory to scanning
6928   oop obj = oop(addr);
6929   assert(obj->is_oop(), "should be an oop");
6930   assert(_finger <= addr, "_finger runneth ahead");
6931   // advance the finger to right end of this object
6932   _finger = addr + obj->size();
6933   assert(_finger > addr, "we just incremented it above");
6934   // Note: the finger doesn't advance while we drain
6935   // the stack below.
6936   bool res = _mark_stack->push(obj);
6937   assert(res, "Empty non-zero size stack should have space for single push");
6938   while (!_mark_stack->isEmpty()) {
6939     oop new_oop = _mark_stack->pop();
6940     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6941     // now scan this oop's oops
6942     new_oop->oop_iterate(&_pam_verify_closure);
6943   }
6944   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6945   return true;
6946 }
6947 
6948 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6949   CMSCollector* collector, MemRegion span,
6950   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6951   CMSMarkStack*  mark_stack):
6952   MetadataAwareOopClosure(collector->ref_processor()),
6953   _collector(collector),
6954   _span(span),
6955   _verification_bm(verification_bm),
6956   _cms_bm(cms_bm),
6957   _mark_stack(mark_stack)
6958 { }
6959 
6960 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6961 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6962 
6963 // Upon stack overflow, we discard (part of) the stack,
6964 // remembering the least address amongst those discarded
6965 // in CMSCollector's _restart_address.
6966 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6967   // Remember the least grey address discarded
6968   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6969   _collector->lower_restart_addr(ra);
6970   _mark_stack->reset();  // discard stack contents
6971   _mark_stack->expand(); // expand the stack if possible
6972 }
6973 
6974 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6975   assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6976   HeapWord* addr = (HeapWord*)obj;
6977   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6978     // Oop lies in _span and isn't yet grey or black
6979     _verification_bm->mark(addr);            // now grey
6980     if (!_cms_bm->isMarked(addr)) {
6981       oop(addr)->print();
6982       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6983                              addr);
6984       fatal("... aborting");
6985     }
6986 
6987     if (!_mark_stack->push(obj)) { // stack overflow
6988       if (PrintCMSStatistics != 0) {
6989         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6990                                SIZE_FORMAT, _mark_stack->capacity());
6991       }
6992       assert(_mark_stack->isFull(), "Else push should have succeeded");
6993       handle_stack_overflow(addr);
6994     }
6995     // anything including and to the right of _finger
6996     // will be scanned as we iterate over the remainder of the
6997     // bit map
6998   }
6999 }
7000 
7001 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7002                      MemRegion span,
7003                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7004                      HeapWord* finger, MarkFromRootsClosure* parent) :
7005   MetadataAwareOopClosure(collector->ref_processor()),
7006   _collector(collector),
7007   _span(span),
7008   _bitMap(bitMap),
7009   _markStack(markStack),
7010   _finger(finger),
7011   _parent(parent)
7012 { }
7013 
7014 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7015                      MemRegion span,
7016                      CMSBitMap* bit_map,
7017                      OopTaskQueue* work_queue,
7018                      CMSMarkStack*  overflow_stack,
7019                      HeapWord* finger,
7020                      HeapWord** global_finger_addr,
7021                      Par_MarkFromRootsClosure* parent) :
7022   MetadataAwareOopClosure(collector->ref_processor()),
7023   _collector(collector),
7024   _whole_span(collector->_span),
7025   _span(span),
7026   _bit_map(bit_map),
7027   _work_queue(work_queue),
7028   _overflow_stack(overflow_stack),
7029   _finger(finger),
7030   _global_finger_addr(global_finger_addr),
7031   _parent(parent)
7032 { }
7033 
7034 // Assumes thread-safe access by callers, who are
7035 // responsible for mutual exclusion.
7036 void CMSCollector::lower_restart_addr(HeapWord* low) {
7037   assert(_span.contains(low), "Out of bounds addr");
7038   if (_restart_addr == NULL) {
7039     _restart_addr = low;
7040   } else {
7041     _restart_addr = MIN2(_restart_addr, low);
7042   }
7043 }
7044 
7045 // Upon stack overflow, we discard (part of) the stack,
7046 // remembering the least address amongst those discarded
7047 // in CMSCollector's _restart_address.
7048 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7049   // Remember the least grey address discarded
7050   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7051   _collector->lower_restart_addr(ra);
7052   _markStack->reset();  // discard stack contents
7053   _markStack->expand(); // expand the stack if possible
7054 }
7055 
7056 // Upon stack overflow, we discard (part of) the stack,
7057 // remembering the least address amongst those discarded
7058 // in CMSCollector's _restart_address.
7059 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7060   // We need to do this under a mutex to prevent other
7061   // workers from interfering with the work done below.
7062   MutexLockerEx ml(_overflow_stack->par_lock(),
7063                    Mutex::_no_safepoint_check_flag);
7064   // Remember the least grey address discarded
7065   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7066   _collector->lower_restart_addr(ra);
7067   _overflow_stack->reset();  // discard stack contents
7068   _overflow_stack->expand(); // expand the stack if possible
7069 }
7070 
7071 void PushOrMarkClosure::do_oop(oop obj) {
7072   // Ignore mark word because we are running concurrent with mutators.
7073   assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7074   HeapWord* addr = (HeapWord*)obj;
7075   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7076     // Oop lies in _span and isn't yet grey or black
7077     _bitMap->mark(addr);            // now grey
7078     if (addr < _finger) {
7079       // the bit map iteration has already either passed, or
7080       // sampled, this bit in the bit map; we'll need to
7081       // use the marking stack to scan this oop's oops.
7082       bool simulate_overflow = false;
7083       NOT_PRODUCT(
7084         if (CMSMarkStackOverflowALot &&
7085             _collector->simulate_overflow()) {
7086           // simulate a stack overflow
7087           simulate_overflow = true;
7088         }
7089       )
7090       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7091         if (PrintCMSStatistics != 0) {
7092           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7093                                  SIZE_FORMAT, _markStack->capacity());
7094         }
7095         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7096         handle_stack_overflow(addr);
7097       }
7098     }
7099     // anything including and to the right of _finger
7100     // will be scanned as we iterate over the remainder of the
7101     // bit map
7102     do_yield_check();
7103   }
7104 }
7105 
7106 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7107 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7108 
7109 void Par_PushOrMarkClosure::do_oop(oop obj) {
7110   // Ignore mark word because we are running concurrent with mutators.
7111   assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7112   HeapWord* addr = (HeapWord*)obj;
7113   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7114     // Oop lies in _span and isn't yet grey or black
7115     // We read the global_finger (volatile read) strictly after marking oop
7116     bool res = _bit_map->par_mark(addr);    // now grey
7117     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7118     // Should we push this marked oop on our stack?
7119     // -- if someone else marked it, nothing to do
7120     // -- if target oop is above global finger nothing to do
7121     // -- if target oop is in chunk and above local finger
7122     //      then nothing to do
7123     // -- else push on work queue
7124     if (   !res       // someone else marked it, they will deal with it
7125         || (addr >= *gfa)  // will be scanned in a later task
7126         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7127       return;
7128     }
7129     // the bit map iteration has already either passed, or
7130     // sampled, this bit in the bit map; we'll need to
7131     // use the marking stack to scan this oop's oops.
7132     bool simulate_overflow = false;
7133     NOT_PRODUCT(
7134       if (CMSMarkStackOverflowALot &&
7135           _collector->simulate_overflow()) {
7136         // simulate a stack overflow
7137         simulate_overflow = true;
7138       }
7139     )
7140     if (simulate_overflow ||
7141         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7142       // stack overflow
7143       if (PrintCMSStatistics != 0) {
7144         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7145                                SIZE_FORMAT, _overflow_stack->capacity());
7146       }
7147       // We cannot assert that the overflow stack is full because
7148       // it may have been emptied since.
7149       assert(simulate_overflow ||
7150              _work_queue->size() == _work_queue->max_elems(),
7151             "Else push should have succeeded");
7152       handle_stack_overflow(addr);
7153     }
7154     do_yield_check();
7155   }
7156 }
7157 
7158 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7159 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7160 
7161 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7162                                        MemRegion span,
7163                                        ReferenceProcessor* rp,
7164                                        CMSBitMap* bit_map,
7165                                        CMSBitMap* mod_union_table,
7166                                        CMSMarkStack*  mark_stack,
7167                                        bool           concurrent_precleaning):
7168   MetadataAwareOopClosure(rp),
7169   _collector(collector),
7170   _span(span),
7171   _bit_map(bit_map),
7172   _mod_union_table(mod_union_table),
7173   _mark_stack(mark_stack),
7174   _concurrent_precleaning(concurrent_precleaning)
7175 {
7176   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7177 }
7178 
7179 // Grey object rescan during pre-cleaning and second checkpoint phases --
7180 // the non-parallel version (the parallel version appears further below.)
7181 void PushAndMarkClosure::do_oop(oop obj) {
7182   // Ignore mark word verification. If during concurrent precleaning,
7183   // the object monitor may be locked. If during the checkpoint
7184   // phases, the object may already have been reached by a  different
7185   // path and may be at the end of the global overflow list (so
7186   // the mark word may be NULL).
7187   assert(obj->is_oop_or_null(true /* ignore mark word */),
7188          err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7189   HeapWord* addr = (HeapWord*)obj;
7190   // Check if oop points into the CMS generation
7191   // and is not marked
7192   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7193     // a white object ...
7194     _bit_map->mark(addr);         // ... now grey
7195     // push on the marking stack (grey set)
7196     bool simulate_overflow = false;
7197     NOT_PRODUCT(
7198       if (CMSMarkStackOverflowALot &&
7199           _collector->simulate_overflow()) {
7200         // simulate a stack overflow
7201         simulate_overflow = true;
7202       }
7203     )
7204     if (simulate_overflow || !_mark_stack->push(obj)) {
7205       if (_concurrent_precleaning) {
7206          // During precleaning we can just dirty the appropriate card(s)
7207          // in the mod union table, thus ensuring that the object remains
7208          // in the grey set  and continue. In the case of object arrays
7209          // we need to dirty all of the cards that the object spans,
7210          // since the rescan of object arrays will be limited to the
7211          // dirty cards.
7212          // Note that no one can be interfering with us in this action
7213          // of dirtying the mod union table, so no locking or atomics
7214          // are required.
7215          if (obj->is_objArray()) {
7216            size_t sz = obj->size();
7217            HeapWord* end_card_addr = (HeapWord*)round_to(
7218                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7219            MemRegion redirty_range = MemRegion(addr, end_card_addr);
7220            assert(!redirty_range.is_empty(), "Arithmetical tautology");
7221            _mod_union_table->mark_range(redirty_range);
7222          } else {
7223            _mod_union_table->mark(addr);
7224          }
7225          _collector->_ser_pmc_preclean_ovflw++;
7226       } else {
7227          // During the remark phase, we need to remember this oop
7228          // in the overflow list.
7229          _collector->push_on_overflow_list(obj);
7230          _collector->_ser_pmc_remark_ovflw++;
7231       }
7232     }
7233   }
7234 }
7235 
7236 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7237                                                MemRegion span,
7238                                                ReferenceProcessor* rp,
7239                                                CMSBitMap* bit_map,
7240                                                OopTaskQueue* work_queue):
7241   MetadataAwareOopClosure(rp),
7242   _collector(collector),
7243   _span(span),
7244   _bit_map(bit_map),
7245   _work_queue(work_queue)
7246 {
7247   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7248 }
7249 
7250 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7251 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7252 
7253 // Grey object rescan during second checkpoint phase --
7254 // the parallel version.
7255 void Par_PushAndMarkClosure::do_oop(oop obj) {
7256   // In the assert below, we ignore the mark word because
7257   // this oop may point to an already visited object that is
7258   // on the overflow stack (in which case the mark word has
7259   // been hijacked for chaining into the overflow stack --
7260   // if this is the last object in the overflow stack then
7261   // its mark word will be NULL). Because this object may
7262   // have been subsequently popped off the global overflow
7263   // stack, and the mark word possibly restored to the prototypical
7264   // value, by the time we get to examined this failing assert in
7265   // the debugger, is_oop_or_null(false) may subsequently start
7266   // to hold.
7267   assert(obj->is_oop_or_null(true),
7268          err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7269   HeapWord* addr = (HeapWord*)obj;
7270   // Check if oop points into the CMS generation
7271   // and is not marked
7272   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7273     // a white object ...
7274     // If we manage to "claim" the object, by being the
7275     // first thread to mark it, then we push it on our
7276     // marking stack
7277     if (_bit_map->par_mark(addr)) {     // ... now grey
7278       // push on work queue (grey set)
7279       bool simulate_overflow = false;
7280       NOT_PRODUCT(
7281         if (CMSMarkStackOverflowALot &&
7282             _collector->par_simulate_overflow()) {
7283           // simulate a stack overflow
7284           simulate_overflow = true;
7285         }
7286       )
7287       if (simulate_overflow || !_work_queue->push(obj)) {
7288         _collector->par_push_on_overflow_list(obj);
7289         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7290       }
7291     } // Else, some other thread got there first
7292   }
7293 }
7294 
7295 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7296 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7297 
7298 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7299   Mutex* bml = _collector->bitMapLock();
7300   assert_lock_strong(bml);
7301   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7302          "CMS thread should hold CMS token");
7303 
7304   bml->unlock();
7305   ConcurrentMarkSweepThread::desynchronize(true);
7306 
7307   _collector->stopTimer();
7308   if (PrintCMSStatistics != 0) {
7309     _collector->incrementYields();
7310   }
7311 
7312   // See the comment in coordinator_yield()
7313   for (unsigned i = 0; i < CMSYieldSleepCount &&
7314                        ConcurrentMarkSweepThread::should_yield() &&
7315                        !CMSCollector::foregroundGCIsActive(); ++i) {
7316     os::sleep(Thread::current(), 1, false);
7317   }
7318 
7319   ConcurrentMarkSweepThread::synchronize(true);
7320   bml->lock();
7321 
7322   _collector->startTimer();
7323 }
7324 
7325 bool CMSPrecleanRefsYieldClosure::should_return() {
7326   if (ConcurrentMarkSweepThread::should_yield()) {
7327     do_yield_work();
7328   }
7329   return _collector->foregroundGCIsActive();
7330 }
7331 
7332 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7333   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7334          "mr should be aligned to start at a card boundary");
7335   // We'd like to assert:
7336   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7337   //        "mr should be a range of cards");
7338   // However, that would be too strong in one case -- the last
7339   // partition ends at _unallocated_block which, in general, can be
7340   // an arbitrary boundary, not necessarily card aligned.
7341   if (PrintCMSStatistics != 0) {
7342     _num_dirty_cards +=
7343          mr.word_size()/CardTableModRefBS::card_size_in_words;
7344   }
7345   _space->object_iterate_mem(mr, &_scan_cl);
7346 }
7347 
7348 SweepClosure::SweepClosure(CMSCollector* collector,
7349                            ConcurrentMarkSweepGeneration* g,
7350                            CMSBitMap* bitMap, bool should_yield) :
7351   _collector(collector),
7352   _g(g),
7353   _sp(g->cmsSpace()),
7354   _limit(_sp->sweep_limit()),
7355   _freelistLock(_sp->freelistLock()),
7356   _bitMap(bitMap),
7357   _yield(should_yield),
7358   _inFreeRange(false),           // No free range at beginning of sweep
7359   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7360   _lastFreeRangeCoalesced(false),
7361   _freeFinger(g->used_region().start())
7362 {
7363   NOT_PRODUCT(
7364     _numObjectsFreed = 0;
7365     _numWordsFreed   = 0;
7366     _numObjectsLive = 0;
7367     _numWordsLive = 0;
7368     _numObjectsAlreadyFree = 0;
7369     _numWordsAlreadyFree = 0;
7370     _last_fc = NULL;
7371 
7372     _sp->initializeIndexedFreeListArrayReturnedBytes();
7373     _sp->dictionary()->initialize_dict_returned_bytes();
7374   )
7375   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7376          "sweep _limit out of bounds");
7377   if (CMSTraceSweeper) {
7378     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7379                         _limit);
7380   }
7381 }
7382 
7383 void SweepClosure::print_on(outputStream* st) const {
7384   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7385                 _sp->bottom(), _sp->end());
7386   tty->print_cr("_limit = " PTR_FORMAT, _limit);
7387   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
7388   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
7389   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7390                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7391 }
7392 
7393 #ifndef PRODUCT
7394 // Assertion checking only:  no useful work in product mode --
7395 // however, if any of the flags below become product flags,
7396 // you may need to review this code to see if it needs to be
7397 // enabled in product mode.
7398 SweepClosure::~SweepClosure() {
7399   assert_lock_strong(_freelistLock);
7400   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7401          "sweep _limit out of bounds");
7402   if (inFreeRange()) {
7403     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7404     print();
7405     ShouldNotReachHere();
7406   }
7407   if (Verbose && PrintGC) {
7408     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
7409                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7410     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
7411                            SIZE_FORMAT" bytes  "
7412       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7413       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7414       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7415     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7416                         * sizeof(HeapWord);
7417     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7418 
7419     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7420       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7421       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7422       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7423       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7424       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
7425         indexListReturnedBytes);
7426       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
7427         dict_returned_bytes);
7428     }
7429   }
7430   if (CMSTraceSweeper) {
7431     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7432                            _limit);
7433   }
7434 }
7435 #endif  // PRODUCT
7436 
7437 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7438     bool freeRangeInFreeLists) {
7439   if (CMSTraceSweeper) {
7440     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7441                freeFinger, freeRangeInFreeLists);
7442   }
7443   assert(!inFreeRange(), "Trampling existing free range");
7444   set_inFreeRange(true);
7445   set_lastFreeRangeCoalesced(false);
7446 
7447   set_freeFinger(freeFinger);
7448   set_freeRangeInFreeLists(freeRangeInFreeLists);
7449   if (CMSTestInFreeList) {
7450     if (freeRangeInFreeLists) {
7451       FreeChunk* fc = (FreeChunk*) freeFinger;
7452       assert(fc->is_free(), "A chunk on the free list should be free.");
7453       assert(fc->size() > 0, "Free range should have a size");
7454       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7455     }
7456   }
7457 }
7458 
7459 // Note that the sweeper runs concurrently with mutators. Thus,
7460 // it is possible for direct allocation in this generation to happen
7461 // in the middle of the sweep. Note that the sweeper also coalesces
7462 // contiguous free blocks. Thus, unless the sweeper and the allocator
7463 // synchronize appropriately freshly allocated blocks may get swept up.
7464 // This is accomplished by the sweeper locking the free lists while
7465 // it is sweeping. Thus blocks that are determined to be free are
7466 // indeed free. There is however one additional complication:
7467 // blocks that have been allocated since the final checkpoint and
7468 // mark, will not have been marked and so would be treated as
7469 // unreachable and swept up. To prevent this, the allocator marks
7470 // the bit map when allocating during the sweep phase. This leads,
7471 // however, to a further complication -- objects may have been allocated
7472 // but not yet initialized -- in the sense that the header isn't yet
7473 // installed. The sweeper can not then determine the size of the block
7474 // in order to skip over it. To deal with this case, we use a technique
7475 // (due to Printezis) to encode such uninitialized block sizes in the
7476 // bit map. Since the bit map uses a bit per every HeapWord, but the
7477 // CMS generation has a minimum object size of 3 HeapWords, it follows
7478 // that "normal marks" won't be adjacent in the bit map (there will
7479 // always be at least two 0 bits between successive 1 bits). We make use
7480 // of these "unused" bits to represent uninitialized blocks -- the bit
7481 // corresponding to the start of the uninitialized object and the next
7482 // bit are both set. Finally, a 1 bit marks the end of the object that
7483 // started with the two consecutive 1 bits to indicate its potentially
7484 // uninitialized state.
7485 
7486 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7487   FreeChunk* fc = (FreeChunk*)addr;
7488   size_t res;
7489 
7490   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7491   // than "addr == _limit" because although _limit was a block boundary when
7492   // we started the sweep, it may no longer be one because heap expansion
7493   // may have caused us to coalesce the block ending at the address _limit
7494   // with a newly expanded chunk (this happens when _limit was set to the
7495   // previous _end of the space), so we may have stepped past _limit:
7496   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7497   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7498     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7499            "sweep _limit out of bounds");
7500     assert(addr < _sp->end(), "addr out of bounds");
7501     // Flush any free range we might be holding as a single
7502     // coalesced chunk to the appropriate free list.
7503     if (inFreeRange()) {
7504       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7505              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
7506       flush_cur_free_chunk(freeFinger(),
7507                            pointer_delta(addr, freeFinger()));
7508       if (CMSTraceSweeper) {
7509         gclog_or_tty->print("Sweep: last chunk: ");
7510         gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
7511                    "[coalesced:%d]\n",
7512                    freeFinger(), pointer_delta(addr, freeFinger()),
7513                    lastFreeRangeCoalesced() ? 1 : 0);
7514       }
7515     }
7516 
7517     // help the iterator loop finish
7518     return pointer_delta(_sp->end(), addr);
7519   }
7520 
7521   assert(addr < _limit, "sweep invariant");
7522   // check if we should yield
7523   do_yield_check(addr);
7524   if (fc->is_free()) {
7525     // Chunk that is already free
7526     res = fc->size();
7527     do_already_free_chunk(fc);
7528     debug_only(_sp->verifyFreeLists());
7529     // If we flush the chunk at hand in lookahead_and_flush()
7530     // and it's coalesced with a preceding chunk, then the
7531     // process of "mangling" the payload of the coalesced block
7532     // will cause erasure of the size information from the
7533     // (erstwhile) header of all the coalesced blocks but the
7534     // first, so the first disjunct in the assert will not hold
7535     // in that specific case (in which case the second disjunct
7536     // will hold).
7537     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7538            "Otherwise the size info doesn't change at this step");
7539     NOT_PRODUCT(
7540       _numObjectsAlreadyFree++;
7541       _numWordsAlreadyFree += res;
7542     )
7543     NOT_PRODUCT(_last_fc = fc;)
7544   } else if (!_bitMap->isMarked(addr)) {
7545     // Chunk is fresh garbage
7546     res = do_garbage_chunk(fc);
7547     debug_only(_sp->verifyFreeLists());
7548     NOT_PRODUCT(
7549       _numObjectsFreed++;
7550       _numWordsFreed += res;
7551     )
7552   } else {
7553     // Chunk that is alive.
7554     res = do_live_chunk(fc);
7555     debug_only(_sp->verifyFreeLists());
7556     NOT_PRODUCT(
7557         _numObjectsLive++;
7558         _numWordsLive += res;
7559     )
7560   }
7561   return res;
7562 }
7563 
7564 // For the smart allocation, record following
7565 //  split deaths - a free chunk is removed from its free list because
7566 //      it is being split into two or more chunks.
7567 //  split birth - a free chunk is being added to its free list because
7568 //      a larger free chunk has been split and resulted in this free chunk.
7569 //  coal death - a free chunk is being removed from its free list because
7570 //      it is being coalesced into a large free chunk.
7571 //  coal birth - a free chunk is being added to its free list because
7572 //      it was created when two or more free chunks where coalesced into
7573 //      this free chunk.
7574 //
7575 // These statistics are used to determine the desired number of free
7576 // chunks of a given size.  The desired number is chosen to be relative
7577 // to the end of a CMS sweep.  The desired number at the end of a sweep
7578 // is the
7579 //      count-at-end-of-previous-sweep (an amount that was enough)
7580 //              - count-at-beginning-of-current-sweep  (the excess)
7581 //              + split-births  (gains in this size during interval)
7582 //              - split-deaths  (demands on this size during interval)
7583 // where the interval is from the end of one sweep to the end of the
7584 // next.
7585 //
7586 // When sweeping the sweeper maintains an accumulated chunk which is
7587 // the chunk that is made up of chunks that have been coalesced.  That
7588 // will be termed the left-hand chunk.  A new chunk of garbage that
7589 // is being considered for coalescing will be referred to as the
7590 // right-hand chunk.
7591 //
7592 // When making a decision on whether to coalesce a right-hand chunk with
7593 // the current left-hand chunk, the current count vs. the desired count
7594 // of the left-hand chunk is considered.  Also if the right-hand chunk
7595 // is near the large chunk at the end of the heap (see
7596 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7597 // left-hand chunk is coalesced.
7598 //
7599 // When making a decision about whether to split a chunk, the desired count
7600 // vs. the current count of the candidate to be split is also considered.
7601 // If the candidate is underpopulated (currently fewer chunks than desired)
7602 // a chunk of an overpopulated (currently more chunks than desired) size may
7603 // be chosen.  The "hint" associated with a free list, if non-null, points
7604 // to a free list which may be overpopulated.
7605 //
7606 
7607 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7608   const size_t size = fc->size();
7609   // Chunks that cannot be coalesced are not in the
7610   // free lists.
7611   if (CMSTestInFreeList && !fc->cantCoalesce()) {
7612     assert(_sp->verify_chunk_in_free_list(fc),
7613       "free chunk should be in free lists");
7614   }
7615   // a chunk that is already free, should not have been
7616   // marked in the bit map
7617   HeapWord* const addr = (HeapWord*) fc;
7618   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7619   // Verify that the bit map has no bits marked between
7620   // addr and purported end of this block.
7621   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7622 
7623   // Some chunks cannot be coalesced under any circumstances.
7624   // See the definition of cantCoalesce().
7625   if (!fc->cantCoalesce()) {
7626     // This chunk can potentially be coalesced.
7627     if (_sp->adaptive_freelists()) {
7628       // All the work is done in
7629       do_post_free_or_garbage_chunk(fc, size);
7630     } else {  // Not adaptive free lists
7631       // this is a free chunk that can potentially be coalesced by the sweeper;
7632       if (!inFreeRange()) {
7633         // if the next chunk is a free block that can't be coalesced
7634         // it doesn't make sense to remove this chunk from the free lists
7635         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7636         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7637         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
7638             nextChunk->is_free()               &&     // ... which is free...
7639             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
7640           // nothing to do
7641         } else {
7642           // Potentially the start of a new free range:
7643           // Don't eagerly remove it from the free lists.
7644           // No need to remove it if it will just be put
7645           // back again.  (Also from a pragmatic point of view
7646           // if it is a free block in a region that is beyond
7647           // any allocated blocks, an assertion will fail)
7648           // Remember the start of a free run.
7649           initialize_free_range(addr, true);
7650           // end - can coalesce with next chunk
7651         }
7652       } else {
7653         // the midst of a free range, we are coalescing
7654         print_free_block_coalesced(fc);
7655         if (CMSTraceSweeper) {
7656           gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
7657         }
7658         // remove it from the free lists
7659         _sp->removeFreeChunkFromFreeLists(fc);
7660         set_lastFreeRangeCoalesced(true);
7661         // If the chunk is being coalesced and the current free range is
7662         // in the free lists, remove the current free range so that it
7663         // will be returned to the free lists in its entirety - all
7664         // the coalesced pieces included.
7665         if (freeRangeInFreeLists()) {
7666           FreeChunk* ffc = (FreeChunk*) freeFinger();
7667           assert(ffc->size() == pointer_delta(addr, freeFinger()),
7668             "Size of free range is inconsistent with chunk size.");
7669           if (CMSTestInFreeList) {
7670             assert(_sp->verify_chunk_in_free_list(ffc),
7671               "free range is not in free lists");
7672           }
7673           _sp->removeFreeChunkFromFreeLists(ffc);
7674           set_freeRangeInFreeLists(false);
7675         }
7676       }
7677     }
7678     // Note that if the chunk is not coalescable (the else arm
7679     // below), we unconditionally flush, without needing to do
7680     // a "lookahead," as we do below.
7681     if (inFreeRange()) lookahead_and_flush(fc, size);
7682   } else {
7683     // Code path common to both original and adaptive free lists.
7684 
7685     // cant coalesce with previous block; this should be treated
7686     // as the end of a free run if any
7687     if (inFreeRange()) {
7688       // we kicked some butt; time to pick up the garbage
7689       assert(freeFinger() < addr, "freeFinger points too high");
7690       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7691     }
7692     // else, nothing to do, just continue
7693   }
7694 }
7695 
7696 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7697   // This is a chunk of garbage.  It is not in any free list.
7698   // Add it to a free list or let it possibly be coalesced into
7699   // a larger chunk.
7700   HeapWord* const addr = (HeapWord*) fc;
7701   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7702 
7703   if (_sp->adaptive_freelists()) {
7704     // Verify that the bit map has no bits marked between
7705     // addr and purported end of just dead object.
7706     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7707 
7708     do_post_free_or_garbage_chunk(fc, size);
7709   } else {
7710     if (!inFreeRange()) {
7711       // start of a new free range
7712       assert(size > 0, "A free range should have a size");
7713       initialize_free_range(addr, false);
7714     } else {
7715       // this will be swept up when we hit the end of the
7716       // free range
7717       if (CMSTraceSweeper) {
7718         gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
7719       }
7720       // If the chunk is being coalesced and the current free range is
7721       // in the free lists, remove the current free range so that it
7722       // will be returned to the free lists in its entirety - all
7723       // the coalesced pieces included.
7724       if (freeRangeInFreeLists()) {
7725         FreeChunk* ffc = (FreeChunk*)freeFinger();
7726         assert(ffc->size() == pointer_delta(addr, freeFinger()),
7727           "Size of free range is inconsistent with chunk size.");
7728         if (CMSTestInFreeList) {
7729           assert(_sp->verify_chunk_in_free_list(ffc),
7730             "free range is not in free lists");
7731         }
7732         _sp->removeFreeChunkFromFreeLists(ffc);
7733         set_freeRangeInFreeLists(false);
7734       }
7735       set_lastFreeRangeCoalesced(true);
7736     }
7737     // this will be swept up when we hit the end of the free range
7738 
7739     // Verify that the bit map has no bits marked between
7740     // addr and purported end of just dead object.
7741     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7742   }
7743   assert(_limit >= addr + size,
7744          "A freshly garbage chunk can't possibly straddle over _limit");
7745   if (inFreeRange()) lookahead_and_flush(fc, size);
7746   return size;
7747 }
7748 
7749 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7750   HeapWord* addr = (HeapWord*) fc;
7751   // The sweeper has just found a live object. Return any accumulated
7752   // left hand chunk to the free lists.
7753   if (inFreeRange()) {
7754     assert(freeFinger() < addr, "freeFinger points too high");
7755     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7756   }
7757 
7758   // This object is live: we'd normally expect this to be
7759   // an oop, and like to assert the following:
7760   // assert(oop(addr)->is_oop(), "live block should be an oop");
7761   // However, as we commented above, this may be an object whose
7762   // header hasn't yet been initialized.
7763   size_t size;
7764   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7765   if (_bitMap->isMarked(addr + 1)) {
7766     // Determine the size from the bit map, rather than trying to
7767     // compute it from the object header.
7768     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7769     size = pointer_delta(nextOneAddr + 1, addr);
7770     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7771            "alignment problem");
7772 
7773 #ifdef ASSERT
7774       if (oop(addr)->klass_or_null() != NULL) {
7775         // Ignore mark word because we are running concurrent with mutators
7776         assert(oop(addr)->is_oop(true), "live block should be an oop");
7777         assert(size ==
7778                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7779                "P-mark and computed size do not agree");
7780       }
7781 #endif
7782 
7783   } else {
7784     // This should be an initialized object that's alive.
7785     assert(oop(addr)->klass_or_null() != NULL,
7786            "Should be an initialized object");
7787     // Ignore mark word because we are running concurrent with mutators
7788     assert(oop(addr)->is_oop(true), "live block should be an oop");
7789     // Verify that the bit map has no bits marked between
7790     // addr and purported end of this block.
7791     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7792     assert(size >= 3, "Necessary for Printezis marks to work");
7793     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7794     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7795   }
7796   return size;
7797 }
7798 
7799 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7800                                                  size_t chunkSize) {
7801   // do_post_free_or_garbage_chunk() should only be called in the case
7802   // of the adaptive free list allocator.
7803   const bool fcInFreeLists = fc->is_free();
7804   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7805   assert((HeapWord*)fc <= _limit, "sweep invariant");
7806   if (CMSTestInFreeList && fcInFreeLists) {
7807     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7808   }
7809 
7810   if (CMSTraceSweeper) {
7811     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", fc, chunkSize);
7812   }
7813 
7814   HeapWord* const fc_addr = (HeapWord*) fc;
7815 
7816   bool coalesce;
7817   const size_t left  = pointer_delta(fc_addr, freeFinger());
7818   const size_t right = chunkSize;
7819   switch (FLSCoalescePolicy) {
7820     // numeric value forms a coalition aggressiveness metric
7821     case 0:  { // never coalesce
7822       coalesce = false;
7823       break;
7824     }
7825     case 1: { // coalesce if left & right chunks on overpopulated lists
7826       coalesce = _sp->coalOverPopulated(left) &&
7827                  _sp->coalOverPopulated(right);
7828       break;
7829     }
7830     case 2: { // coalesce if left chunk on overpopulated list (default)
7831       coalesce = _sp->coalOverPopulated(left);
7832       break;
7833     }
7834     case 3: { // coalesce if left OR right chunk on overpopulated list
7835       coalesce = _sp->coalOverPopulated(left) ||
7836                  _sp->coalOverPopulated(right);
7837       break;
7838     }
7839     case 4: { // always coalesce
7840       coalesce = true;
7841       break;
7842     }
7843     default:
7844      ShouldNotReachHere();
7845   }
7846 
7847   // Should the current free range be coalesced?
7848   // If the chunk is in a free range and either we decided to coalesce above
7849   // or the chunk is near the large block at the end of the heap
7850   // (isNearLargestChunk() returns true), then coalesce this chunk.
7851   const bool doCoalesce = inFreeRange()
7852                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7853   if (doCoalesce) {
7854     // Coalesce the current free range on the left with the new
7855     // chunk on the right.  If either is on a free list,
7856     // it must be removed from the list and stashed in the closure.
7857     if (freeRangeInFreeLists()) {
7858       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7859       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7860         "Size of free range is inconsistent with chunk size.");
7861       if (CMSTestInFreeList) {
7862         assert(_sp->verify_chunk_in_free_list(ffc),
7863           "Chunk is not in free lists");
7864       }
7865       _sp->coalDeath(ffc->size());
7866       _sp->removeFreeChunkFromFreeLists(ffc);
7867       set_freeRangeInFreeLists(false);
7868     }
7869     if (fcInFreeLists) {
7870       _sp->coalDeath(chunkSize);
7871       assert(fc->size() == chunkSize,
7872         "The chunk has the wrong size or is not in the free lists");
7873       _sp->removeFreeChunkFromFreeLists(fc);
7874     }
7875     set_lastFreeRangeCoalesced(true);
7876     print_free_block_coalesced(fc);
7877   } else {  // not in a free range and/or should not coalesce
7878     // Return the current free range and start a new one.
7879     if (inFreeRange()) {
7880       // In a free range but cannot coalesce with the right hand chunk.
7881       // Put the current free range into the free lists.
7882       flush_cur_free_chunk(freeFinger(),
7883                            pointer_delta(fc_addr, freeFinger()));
7884     }
7885     // Set up for new free range.  Pass along whether the right hand
7886     // chunk is in the free lists.
7887     initialize_free_range((HeapWord*)fc, fcInFreeLists);
7888   }
7889 }
7890 
7891 // Lookahead flush:
7892 // If we are tracking a free range, and this is the last chunk that
7893 // we'll look at because its end crosses past _limit, we'll preemptively
7894 // flush it along with any free range we may be holding on to. Note that
7895 // this can be the case only for an already free or freshly garbage
7896 // chunk. If this block is an object, it can never straddle
7897 // over _limit. The "straddling" occurs when _limit is set at
7898 // the previous end of the space when this cycle started, and
7899 // a subsequent heap expansion caused the previously co-terminal
7900 // free block to be coalesced with the newly expanded portion,
7901 // thus rendering _limit a non-block-boundary making it dangerous
7902 // for the sweeper to step over and examine.
7903 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7904   assert(inFreeRange(), "Should only be called if currently in a free range.");
7905   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7906   assert(_sp->used_region().contains(eob - 1),
7907          err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7908                  " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7909                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7910                  eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
7911   if (eob >= _limit) {
7912     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7913     if (CMSTraceSweeper) {
7914       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7915                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7916                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7917                              _limit, fc, eob, _sp->bottom(), _sp->end());
7918     }
7919     // Return the storage we are tracking back into the free lists.
7920     if (CMSTraceSweeper) {
7921       gclog_or_tty->print_cr("Flushing ... ");
7922     }
7923     assert(freeFinger() < eob, "Error");
7924     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7925   }
7926 }
7927 
7928 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7929   assert(inFreeRange(), "Should only be called if currently in a free range.");
7930   assert(size > 0,
7931     "A zero sized chunk cannot be added to the free lists.");
7932   if (!freeRangeInFreeLists()) {
7933     if (CMSTestInFreeList) {
7934       FreeChunk* fc = (FreeChunk*) chunk;
7935       fc->set_size(size);
7936       assert(!_sp->verify_chunk_in_free_list(fc),
7937         "chunk should not be in free lists yet");
7938     }
7939     if (CMSTraceSweeper) {
7940       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7941                     chunk, size);
7942     }
7943     // A new free range is going to be starting.  The current
7944     // free range has not been added to the free lists yet or
7945     // was removed so add it back.
7946     // If the current free range was coalesced, then the death
7947     // of the free range was recorded.  Record a birth now.
7948     if (lastFreeRangeCoalesced()) {
7949       _sp->coalBirth(size);
7950     }
7951     _sp->addChunkAndRepairOffsetTable(chunk, size,
7952             lastFreeRangeCoalesced());
7953   } else if (CMSTraceSweeper) {
7954     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7955   }
7956   set_inFreeRange(false);
7957   set_freeRangeInFreeLists(false);
7958 }
7959 
7960 // We take a break if we've been at this for a while,
7961 // so as to avoid monopolizing the locks involved.
7962 void SweepClosure::do_yield_work(HeapWord* addr) {
7963   // Return current free chunk being used for coalescing (if any)
7964   // to the appropriate freelist.  After yielding, the next
7965   // free block encountered will start a coalescing range of
7966   // free blocks.  If the next free block is adjacent to the
7967   // chunk just flushed, they will need to wait for the next
7968   // sweep to be coalesced.
7969   if (inFreeRange()) {
7970     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7971   }
7972 
7973   // First give up the locks, then yield, then re-lock.
7974   // We should probably use a constructor/destructor idiom to
7975   // do this unlock/lock or modify the MutexUnlocker class to
7976   // serve our purpose. XXX
7977   assert_lock_strong(_bitMap->lock());
7978   assert_lock_strong(_freelistLock);
7979   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7980          "CMS thread should hold CMS token");
7981   _bitMap->lock()->unlock();
7982   _freelistLock->unlock();
7983   ConcurrentMarkSweepThread::desynchronize(true);
7984   _collector->stopTimer();
7985   if (PrintCMSStatistics != 0) {
7986     _collector->incrementYields();
7987   }
7988 
7989   // See the comment in coordinator_yield()
7990   for (unsigned i = 0; i < CMSYieldSleepCount &&
7991                        ConcurrentMarkSweepThread::should_yield() &&
7992                        !CMSCollector::foregroundGCIsActive(); ++i) {
7993     os::sleep(Thread::current(), 1, false);
7994   }
7995 
7996   ConcurrentMarkSweepThread::synchronize(true);
7997   _freelistLock->lock();
7998   _bitMap->lock()->lock_without_safepoint_check();
7999   _collector->startTimer();
8000 }
8001 
8002 #ifndef PRODUCT
8003 // This is actually very useful in a product build if it can
8004 // be called from the debugger.  Compile it into the product
8005 // as needed.
8006 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8007   return debug_cms_space->verify_chunk_in_free_list(fc);
8008 }
8009 #endif
8010 
8011 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8012   if (CMSTraceSweeper) {
8013     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8014                            fc, fc->size());
8015   }
8016 }
8017 
8018 // CMSIsAliveClosure
8019 bool CMSIsAliveClosure::do_object_b(oop obj) {
8020   HeapWord* addr = (HeapWord*)obj;
8021   return addr != NULL &&
8022          (!_span.contains(addr) || _bit_map->isMarked(addr));
8023 }
8024 
8025 
8026 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8027                       MemRegion span,
8028                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8029                       bool cpc):
8030   _collector(collector),
8031   _span(span),
8032   _bit_map(bit_map),
8033   _mark_stack(mark_stack),
8034   _concurrent_precleaning(cpc) {
8035   assert(!_span.is_empty(), "Empty span could spell trouble");
8036 }
8037 
8038 
8039 // CMSKeepAliveClosure: the serial version
8040 void CMSKeepAliveClosure::do_oop(oop obj) {
8041   HeapWord* addr = (HeapWord*)obj;
8042   if (_span.contains(addr) &&
8043       !_bit_map->isMarked(addr)) {
8044     _bit_map->mark(addr);
8045     bool simulate_overflow = false;
8046     NOT_PRODUCT(
8047       if (CMSMarkStackOverflowALot &&
8048           _collector->simulate_overflow()) {
8049         // simulate a stack overflow
8050         simulate_overflow = true;
8051       }
8052     )
8053     if (simulate_overflow || !_mark_stack->push(obj)) {
8054       if (_concurrent_precleaning) {
8055         // We dirty the overflown object and let the remark
8056         // phase deal with it.
8057         assert(_collector->overflow_list_is_empty(), "Error");
8058         // In the case of object arrays, we need to dirty all of
8059         // the cards that the object spans. No locking or atomics
8060         // are needed since no one else can be mutating the mod union
8061         // table.
8062         if (obj->is_objArray()) {
8063           size_t sz = obj->size();
8064           HeapWord* end_card_addr =
8065             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8066           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8067           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8068           _collector->_modUnionTable.mark_range(redirty_range);
8069         } else {
8070           _collector->_modUnionTable.mark(addr);
8071         }
8072         _collector->_ser_kac_preclean_ovflw++;
8073       } else {
8074         _collector->push_on_overflow_list(obj);
8075         _collector->_ser_kac_ovflw++;
8076       }
8077     }
8078   }
8079 }
8080 
8081 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8082 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8083 
8084 // CMSParKeepAliveClosure: a parallel version of the above.
8085 // The work queues are private to each closure (thread),
8086 // but (may be) available for stealing by other threads.
8087 void CMSParKeepAliveClosure::do_oop(oop obj) {
8088   HeapWord* addr = (HeapWord*)obj;
8089   if (_span.contains(addr) &&
8090       !_bit_map->isMarked(addr)) {
8091     // In general, during recursive tracing, several threads
8092     // may be concurrently getting here; the first one to
8093     // "tag" it, claims it.
8094     if (_bit_map->par_mark(addr)) {
8095       bool res = _work_queue->push(obj);
8096       assert(res, "Low water mark should be much less than capacity");
8097       // Do a recursive trim in the hope that this will keep
8098       // stack usage lower, but leave some oops for potential stealers
8099       trim_queue(_low_water_mark);
8100     } // Else, another thread got there first
8101   }
8102 }
8103 
8104 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8105 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8106 
8107 void CMSParKeepAliveClosure::trim_queue(uint max) {
8108   while (_work_queue->size() > max) {
8109     oop new_oop;
8110     if (_work_queue->pop_local(new_oop)) {
8111       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8112       assert(_bit_map->isMarked((HeapWord*)new_oop),
8113              "no white objects on this stack!");
8114       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8115       // iterate over the oops in this oop, marking and pushing
8116       // the ones in CMS heap (i.e. in _span).
8117       new_oop->oop_iterate(&_mark_and_push);
8118     }
8119   }
8120 }
8121 
8122 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8123                                 CMSCollector* collector,
8124                                 MemRegion span, CMSBitMap* bit_map,
8125                                 OopTaskQueue* work_queue):
8126   _collector(collector),
8127   _span(span),
8128   _bit_map(bit_map),
8129   _work_queue(work_queue) { }
8130 
8131 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8132   HeapWord* addr = (HeapWord*)obj;
8133   if (_span.contains(addr) &&
8134       !_bit_map->isMarked(addr)) {
8135     if (_bit_map->par_mark(addr)) {
8136       bool simulate_overflow = false;
8137       NOT_PRODUCT(
8138         if (CMSMarkStackOverflowALot &&
8139             _collector->par_simulate_overflow()) {
8140           // simulate a stack overflow
8141           simulate_overflow = true;
8142         }
8143       )
8144       if (simulate_overflow || !_work_queue->push(obj)) {
8145         _collector->par_push_on_overflow_list(obj);
8146         _collector->_par_kac_ovflw++;
8147       }
8148     } // Else another thread got there already
8149   }
8150 }
8151 
8152 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8153 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8154 
8155 //////////////////////////////////////////////////////////////////
8156 //  CMSExpansionCause                /////////////////////////////
8157 //////////////////////////////////////////////////////////////////
8158 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8159   switch (cause) {
8160     case _no_expansion:
8161       return "No expansion";
8162     case _satisfy_free_ratio:
8163       return "Free ratio";
8164     case _satisfy_promotion:
8165       return "Satisfy promotion";
8166     case _satisfy_allocation:
8167       return "allocation";
8168     case _allocate_par_lab:
8169       return "Par LAB";
8170     case _allocate_par_spooling_space:
8171       return "Par Spooling Space";
8172     case _adaptive_size_policy:
8173       return "Ergonomics";
8174     default:
8175       return "unknown";
8176   }
8177 }
8178 
8179 void CMSDrainMarkingStackClosure::do_void() {
8180   // the max number to take from overflow list at a time
8181   const size_t num = _mark_stack->capacity()/4;
8182   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8183          "Overflow list should be NULL during concurrent phases");
8184   while (!_mark_stack->isEmpty() ||
8185          // if stack is empty, check the overflow list
8186          _collector->take_from_overflow_list(num, _mark_stack)) {
8187     oop obj = _mark_stack->pop();
8188     HeapWord* addr = (HeapWord*)obj;
8189     assert(_span.contains(addr), "Should be within span");
8190     assert(_bit_map->isMarked(addr), "Should be marked");
8191     assert(obj->is_oop(), "Should be an oop");
8192     obj->oop_iterate(_keep_alive);
8193   }
8194 }
8195 
8196 void CMSParDrainMarkingStackClosure::do_void() {
8197   // drain queue
8198   trim_queue(0);
8199 }
8200 
8201 // Trim our work_queue so its length is below max at return
8202 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8203   while (_work_queue->size() > max) {
8204     oop new_oop;
8205     if (_work_queue->pop_local(new_oop)) {
8206       assert(new_oop->is_oop(), "Expected an oop");
8207       assert(_bit_map->isMarked((HeapWord*)new_oop),
8208              "no white objects on this stack!");
8209       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8210       // iterate over the oops in this oop, marking and pushing
8211       // the ones in CMS heap (i.e. in _span).
8212       new_oop->oop_iterate(&_mark_and_push);
8213     }
8214   }
8215 }
8216 
8217 ////////////////////////////////////////////////////////////////////
8218 // Support for Marking Stack Overflow list handling and related code
8219 ////////////////////////////////////////////////////////////////////
8220 // Much of the following code is similar in shape and spirit to the
8221 // code used in ParNewGC. We should try and share that code
8222 // as much as possible in the future.
8223 
8224 #ifndef PRODUCT
8225 // Debugging support for CMSStackOverflowALot
8226 
8227 // It's OK to call this multi-threaded;  the worst thing
8228 // that can happen is that we'll get a bunch of closely
8229 // spaced simulated overflows, but that's OK, in fact
8230 // probably good as it would exercise the overflow code
8231 // under contention.
8232 bool CMSCollector::simulate_overflow() {
8233   if (_overflow_counter-- <= 0) { // just being defensive
8234     _overflow_counter = CMSMarkStackOverflowInterval;
8235     return true;
8236   } else {
8237     return false;
8238   }
8239 }
8240 
8241 bool CMSCollector::par_simulate_overflow() {
8242   return simulate_overflow();
8243 }
8244 #endif
8245 
8246 // Single-threaded
8247 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8248   assert(stack->isEmpty(), "Expected precondition");
8249   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8250   size_t i = num;
8251   oop  cur = _overflow_list;
8252   const markOop proto = markOopDesc::prototype();
8253   NOT_PRODUCT(ssize_t n = 0;)
8254   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8255     next = oop(cur->mark());
8256     cur->set_mark(proto);   // until proven otherwise
8257     assert(cur->is_oop(), "Should be an oop");
8258     bool res = stack->push(cur);
8259     assert(res, "Bit off more than can chew?");
8260     NOT_PRODUCT(n++;)
8261   }
8262   _overflow_list = cur;
8263 #ifndef PRODUCT
8264   assert(_num_par_pushes >= n, "Too many pops?");
8265   _num_par_pushes -=n;
8266 #endif
8267   return !stack->isEmpty();
8268 }
8269 
8270 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
8271 // (MT-safe) Get a prefix of at most "num" from the list.
8272 // The overflow list is chained through the mark word of
8273 // each object in the list. We fetch the entire list,
8274 // break off a prefix of the right size and return the
8275 // remainder. If other threads try to take objects from
8276 // the overflow list at that time, they will wait for
8277 // some time to see if data becomes available. If (and
8278 // only if) another thread places one or more object(s)
8279 // on the global list before we have returned the suffix
8280 // to the global list, we will walk down our local list
8281 // to find its end and append the global list to
8282 // our suffix before returning it. This suffix walk can
8283 // prove to be expensive (quadratic in the amount of traffic)
8284 // when there are many objects in the overflow list and
8285 // there is much producer-consumer contention on the list.
8286 // *NOTE*: The overflow list manipulation code here and
8287 // in ParNewGeneration:: are very similar in shape,
8288 // except that in the ParNew case we use the old (from/eden)
8289 // copy of the object to thread the list via its klass word.
8290 // Because of the common code, if you make any changes in
8291 // the code below, please check the ParNew version to see if
8292 // similar changes might be needed.
8293 // CR 6797058 has been filed to consolidate the common code.
8294 bool CMSCollector::par_take_from_overflow_list(size_t num,
8295                                                OopTaskQueue* work_q,
8296                                                int no_of_gc_threads) {
8297   assert(work_q->size() == 0, "First empty local work queue");
8298   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8299   if (_overflow_list == NULL) {
8300     return false;
8301   }
8302   // Grab the entire list; we'll put back a suffix
8303   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8304   Thread* tid = Thread::current();
8305   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8306   // set to ParallelGCThreads.
8307   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8308   size_t sleep_time_millis = MAX2((size_t)1, num/100);
8309   // If the list is busy, we spin for a short while,
8310   // sleeping between attempts to get the list.
8311   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8312     os::sleep(tid, sleep_time_millis, false);
8313     if (_overflow_list == NULL) {
8314       // Nothing left to take
8315       return false;
8316     } else if (_overflow_list != BUSY) {
8317       // Try and grab the prefix
8318       prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8319     }
8320   }
8321   // If the list was found to be empty, or we spun long
8322   // enough, we give up and return empty-handed. If we leave
8323   // the list in the BUSY state below, it must be the case that
8324   // some other thread holds the overflow list and will set it
8325   // to a non-BUSY state in the future.
8326   if (prefix == NULL || prefix == BUSY) {
8327      // Nothing to take or waited long enough
8328      if (prefix == NULL) {
8329        // Write back the NULL in case we overwrote it with BUSY above
8330        // and it is still the same value.
8331        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8332      }
8333      return false;
8334   }
8335   assert(prefix != NULL && prefix != BUSY, "Error");
8336   size_t i = num;
8337   oop cur = prefix;
8338   // Walk down the first "num" objects, unless we reach the end.
8339   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8340   if (cur->mark() == NULL) {
8341     // We have "num" or fewer elements in the list, so there
8342     // is nothing to return to the global list.
8343     // Write back the NULL in lieu of the BUSY we wrote
8344     // above, if it is still the same value.
8345     if (_overflow_list == BUSY) {
8346       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8347     }
8348   } else {
8349     // Chop off the suffix and return it to the global list.
8350     assert(cur->mark() != BUSY, "Error");
8351     oop suffix_head = cur->mark(); // suffix will be put back on global list
8352     cur->set_mark(NULL);           // break off suffix
8353     // It's possible that the list is still in the empty(busy) state
8354     // we left it in a short while ago; in that case we may be
8355     // able to place back the suffix without incurring the cost
8356     // of a walk down the list.
8357     oop observed_overflow_list = _overflow_list;
8358     oop cur_overflow_list = observed_overflow_list;
8359     bool attached = false;
8360     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8361       observed_overflow_list =
8362         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8363       if (cur_overflow_list == observed_overflow_list) {
8364         attached = true;
8365         break;
8366       } else cur_overflow_list = observed_overflow_list;
8367     }
8368     if (!attached) {
8369       // Too bad, someone else sneaked in (at least) an element; we'll need
8370       // to do a splice. Find tail of suffix so we can prepend suffix to global
8371       // list.
8372       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8373       oop suffix_tail = cur;
8374       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8375              "Tautology");
8376       observed_overflow_list = _overflow_list;
8377       do {
8378         cur_overflow_list = observed_overflow_list;
8379         if (cur_overflow_list != BUSY) {
8380           // Do the splice ...
8381           suffix_tail->set_mark(markOop(cur_overflow_list));
8382         } else { // cur_overflow_list == BUSY
8383           suffix_tail->set_mark(NULL);
8384         }
8385         // ... and try to place spliced list back on overflow_list ...
8386         observed_overflow_list =
8387           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8388       } while (cur_overflow_list != observed_overflow_list);
8389       // ... until we have succeeded in doing so.
8390     }
8391   }
8392 
8393   // Push the prefix elements on work_q
8394   assert(prefix != NULL, "control point invariant");
8395   const markOop proto = markOopDesc::prototype();
8396   oop next;
8397   NOT_PRODUCT(ssize_t n = 0;)
8398   for (cur = prefix; cur != NULL; cur = next) {
8399     next = oop(cur->mark());
8400     cur->set_mark(proto);   // until proven otherwise
8401     assert(cur->is_oop(), "Should be an oop");
8402     bool res = work_q->push(cur);
8403     assert(res, "Bit off more than we can chew?");
8404     NOT_PRODUCT(n++;)
8405   }
8406 #ifndef PRODUCT
8407   assert(_num_par_pushes >= n, "Too many pops?");
8408   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8409 #endif
8410   return true;
8411 }
8412 
8413 // Single-threaded
8414 void CMSCollector::push_on_overflow_list(oop p) {
8415   NOT_PRODUCT(_num_par_pushes++;)
8416   assert(p->is_oop(), "Not an oop");
8417   preserve_mark_if_necessary(p);
8418   p->set_mark((markOop)_overflow_list);
8419   _overflow_list = p;
8420 }
8421 
8422 // Multi-threaded; use CAS to prepend to overflow list
8423 void CMSCollector::par_push_on_overflow_list(oop p) {
8424   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8425   assert(p->is_oop(), "Not an oop");
8426   par_preserve_mark_if_necessary(p);
8427   oop observed_overflow_list = _overflow_list;
8428   oop cur_overflow_list;
8429   do {
8430     cur_overflow_list = observed_overflow_list;
8431     if (cur_overflow_list != BUSY) {
8432       p->set_mark(markOop(cur_overflow_list));
8433     } else {
8434       p->set_mark(NULL);
8435     }
8436     observed_overflow_list =
8437       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8438   } while (cur_overflow_list != observed_overflow_list);
8439 }
8440 #undef BUSY
8441 
8442 // Single threaded
8443 // General Note on GrowableArray: pushes may silently fail
8444 // because we are (temporarily) out of C-heap for expanding
8445 // the stack. The problem is quite ubiquitous and affects
8446 // a lot of code in the JVM. The prudent thing for GrowableArray
8447 // to do (for now) is to exit with an error. However, that may
8448 // be too draconian in some cases because the caller may be
8449 // able to recover without much harm. For such cases, we
8450 // should probably introduce a "soft_push" method which returns
8451 // an indication of success or failure with the assumption that
8452 // the caller may be able to recover from a failure; code in
8453 // the VM can then be changed, incrementally, to deal with such
8454 // failures where possible, thus, incrementally hardening the VM
8455 // in such low resource situations.
8456 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8457   _preserved_oop_stack.push(p);
8458   _preserved_mark_stack.push(m);
8459   assert(m == p->mark(), "Mark word changed");
8460   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8461          "bijection");
8462 }
8463 
8464 // Single threaded
8465 void CMSCollector::preserve_mark_if_necessary(oop p) {
8466   markOop m = p->mark();
8467   if (m->must_be_preserved(p)) {
8468     preserve_mark_work(p, m);
8469   }
8470 }
8471 
8472 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8473   markOop m = p->mark();
8474   if (m->must_be_preserved(p)) {
8475     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8476     // Even though we read the mark word without holding
8477     // the lock, we are assured that it will not change
8478     // because we "own" this oop, so no other thread can
8479     // be trying to push it on the overflow list; see
8480     // the assertion in preserve_mark_work() that checks
8481     // that m == p->mark().
8482     preserve_mark_work(p, m);
8483   }
8484 }
8485 
8486 // We should be able to do this multi-threaded,
8487 // a chunk of stack being a task (this is
8488 // correct because each oop only ever appears
8489 // once in the overflow list. However, it's
8490 // not very easy to completely overlap this with
8491 // other operations, so will generally not be done
8492 // until all work's been completed. Because we
8493 // expect the preserved oop stack (set) to be small,
8494 // it's probably fine to do this single-threaded.
8495 // We can explore cleverer concurrent/overlapped/parallel
8496 // processing of preserved marks if we feel the
8497 // need for this in the future. Stack overflow should
8498 // be so rare in practice and, when it happens, its
8499 // effect on performance so great that this will
8500 // likely just be in the noise anyway.
8501 void CMSCollector::restore_preserved_marks_if_any() {
8502   assert(SafepointSynchronize::is_at_safepoint(),
8503          "world should be stopped");
8504   assert(Thread::current()->is_ConcurrentGC_thread() ||
8505          Thread::current()->is_VM_thread(),
8506          "should be single-threaded");
8507   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8508          "bijection");
8509 
8510   while (!_preserved_oop_stack.is_empty()) {
8511     oop p = _preserved_oop_stack.pop();
8512     assert(p->is_oop(), "Should be an oop");
8513     assert(_span.contains(p), "oop should be in _span");
8514     assert(p->mark() == markOopDesc::prototype(),
8515            "Set when taken from overflow list");
8516     markOop m = _preserved_mark_stack.pop();
8517     p->set_mark(m);
8518   }
8519   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8520          "stacks were cleared above");
8521 }
8522 
8523 #ifndef PRODUCT
8524 bool CMSCollector::no_preserved_marks() const {
8525   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8526 }
8527 #endif
8528 
8529 // Transfer some number of overflown objects to usual marking
8530 // stack. Return true if some objects were transferred.
8531 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8532   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8533                     (size_t)ParGCDesiredObjsFromOverflowList);
8534 
8535   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8536   assert(_collector->overflow_list_is_empty() || res,
8537          "If list is not empty, we should have taken something");
8538   assert(!res || !_mark_stack->isEmpty(),
8539          "If we took something, it should now be on our stack");
8540   return res;
8541 }
8542 
8543 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8544   size_t res = _sp->block_size_no_stall(addr, _collector);
8545   if (_sp->block_is_obj(addr)) {
8546     if (_live_bit_map->isMarked(addr)) {
8547       // It can't have been dead in a previous cycle
8548       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8549     } else {
8550       _dead_bit_map->mark(addr);      // mark the dead object
8551     }
8552   }
8553   // Could be 0, if the block size could not be computed without stalling.
8554   return res;
8555 }
8556 
8557 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8558 
8559   switch (phase) {
8560     case CMSCollector::InitialMarking:
8561       initialize(true  /* fullGC */ ,
8562                  cause /* cause of the GC */,
8563                  true  /* recordGCBeginTime */,
8564                  true  /* recordPreGCUsage */,
8565                  false /* recordPeakUsage */,
8566                  false /* recordPostGCusage */,
8567                  true  /* recordAccumulatedGCTime */,
8568                  false /* recordGCEndTime */,
8569                  false /* countCollection */  );
8570       break;
8571 
8572     case CMSCollector::FinalMarking:
8573       initialize(true  /* fullGC */ ,
8574                  cause /* cause of the GC */,
8575                  false /* recordGCBeginTime */,
8576                  false /* recordPreGCUsage */,
8577                  false /* recordPeakUsage */,
8578                  false /* recordPostGCusage */,
8579                  true  /* recordAccumulatedGCTime */,
8580                  false /* recordGCEndTime */,
8581                  false /* countCollection */  );
8582       break;
8583 
8584     case CMSCollector::Sweeping:
8585       initialize(true  /* fullGC */ ,
8586                  cause /* cause of the GC */,
8587                  false /* recordGCBeginTime */,
8588                  false /* recordPreGCUsage */,
8589                  true  /* recordPeakUsage */,
8590                  true  /* recordPostGCusage */,
8591                  false /* recordAccumulatedGCTime */,
8592                  true  /* recordGCEndTime */,
8593                  true  /* countCollection */  );
8594       break;
8595 
8596     default:
8597       ShouldNotReachHere();
8598   }
8599 }