1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  38 #include "gc_implementation/parNew/parNewGeneration.hpp"
  39 #include "gc_implementation/shared/collectorCounters.hpp"
  40 #include "gc_implementation/shared/gcTimer.hpp"
  41 #include "gc_implementation/shared/gcTrace.hpp"
  42 #include "gc_implementation/shared/gcTraceTime.hpp"
  43 #include "gc_implementation/shared/isGCActiveMark.hpp"
  44 #include "gc_interface/collectedHeap.inline.hpp"
  45 #include "memory/allocation.hpp"
  46 #include "memory/cardTableRS.hpp"
  47 #include "memory/collectorPolicy.hpp"
  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.hpp"
  53 #include "memory/padded.hpp"
  54 #include "memory/referencePolicy.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/tenuredGeneration.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "prims/jvmtiExport.hpp"
  59 #include "runtime/globals_extension.hpp"
  60 #include "runtime/handles.inline.hpp"
  61 #include "runtime/java.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "services/memoryService.hpp"
  64 #include "services/runtimeService.hpp"
  65 
  66 // statics
  67 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  68 bool CMSCollector::_full_gc_requested = false;
  69 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  70 
  71 //////////////////////////////////////////////////////////////////
  72 // In support of CMS/VM thread synchronization
  73 //////////////////////////////////////////////////////////////////
  74 // We split use of the CGC_lock into 2 "levels".
  75 // The low-level locking is of the usual CGC_lock monitor. We introduce
  76 // a higher level "token" (hereafter "CMS token") built on top of the
  77 // low level monitor (hereafter "CGC lock").
  78 // The token-passing protocol gives priority to the VM thread. The
  79 // CMS-lock doesn't provide any fairness guarantees, but clients
  80 // should ensure that it is only held for very short, bounded
  81 // durations.
  82 //
  83 // When either of the CMS thread or the VM thread is involved in
  84 // collection operations during which it does not want the other
  85 // thread to interfere, it obtains the CMS token.
  86 //
  87 // If either thread tries to get the token while the other has
  88 // it, that thread waits. However, if the VM thread and CMS thread
  89 // both want the token, then the VM thread gets priority while the
  90 // CMS thread waits. This ensures, for instance, that the "concurrent"
  91 // phases of the CMS thread's work do not block out the VM thread
  92 // for long periods of time as the CMS thread continues to hog
  93 // the token. (See bug 4616232).
  94 //
  95 // The baton-passing functions are, however, controlled by the
  96 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
  97 // and here the low-level CMS lock, not the high level token,
  98 // ensures mutual exclusion.
  99 //
 100 // Two important conditions that we have to satisfy:
 101 // 1. if a thread does a low-level wait on the CMS lock, then it
 102 //    relinquishes the CMS token if it were holding that token
 103 //    when it acquired the low-level CMS lock.
 104 // 2. any low-level notifications on the low-level lock
 105 //    should only be sent when a thread has relinquished the token.
 106 //
 107 // In the absence of either property, we'd have potential deadlock.
 108 //
 109 // We protect each of the CMS (concurrent and sequential) phases
 110 // with the CMS _token_, not the CMS _lock_.
 111 //
 112 // The only code protected by CMS lock is the token acquisition code
 113 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 114 // baton-passing code.
 115 //
 116 // Unfortunately, i couldn't come up with a good abstraction to factor and
 117 // hide the naked CGC_lock manipulation in the baton-passing code
 118 // further below. That's something we should try to do. Also, the proof
 119 // of correctness of this 2-level locking scheme is far from obvious,
 120 // and potentially quite slippery. We have an uneasy supsicion, for instance,
 121 // that there may be a theoretical possibility of delay/starvation in the
 122 // low-level lock/wait/notify scheme used for the baton-passing because of
 123 // potential intereference with the priority scheme embodied in the
 124 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 125 // invocation further below and marked with "XXX 20011219YSR".
 126 // Indeed, as we note elsewhere, this may become yet more slippery
 127 // in the presence of multiple CMS and/or multiple VM threads. XXX
 128 
 129 class CMSTokenSync: public StackObj {
 130  private:
 131   bool _is_cms_thread;
 132  public:
 133   CMSTokenSync(bool is_cms_thread):
 134     _is_cms_thread(is_cms_thread) {
 135     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 136            "Incorrect argument to constructor");
 137     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 138   }
 139 
 140   ~CMSTokenSync() {
 141     assert(_is_cms_thread ?
 142              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 143              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 144           "Incorrect state");
 145     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 146   }
 147 };
 148 
 149 // Convenience class that does a CMSTokenSync, and then acquires
 150 // upto three locks.
 151 class CMSTokenSyncWithLocks: public CMSTokenSync {
 152  private:
 153   // Note: locks are acquired in textual declaration order
 154   // and released in the opposite order
 155   MutexLockerEx _locker1, _locker2, _locker3;
 156  public:
 157   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 158                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 159     CMSTokenSync(is_cms_thread),
 160     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 161     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 162     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 163   { }
 164 };
 165 
 166 
 167 // Wrapper class to temporarily disable icms during a foreground cms collection.
 168 class ICMSDisabler: public StackObj {
 169  public:
 170   // The ctor disables icms and wakes up the thread so it notices the change;
 171   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 172   // CMSIncrementalMode.
 173   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 174   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 175 };
 176 
 177 //////////////////////////////////////////////////////////////////
 178 //  Concurrent Mark-Sweep Generation /////////////////////////////
 179 //////////////////////////////////////////////////////////////////
 180 
 181 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 182 
 183 // This struct contains per-thread things necessary to support parallel
 184 // young-gen collection.
 185 class CMSParGCThreadState: public CHeapObj<mtGC> {
 186  public:
 187   CFLS_LAB lab;
 188   PromotionInfo promo;
 189 
 190   // Constructor.
 191   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 192     promo.setSpace(cfls);
 193   }
 194 };
 195 
 196 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 197      ReservedSpace rs, size_t initial_byte_size, int level,
 198      CardTableRS* ct, bool use_adaptive_freelists,
 199      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 200   CardGeneration(rs, initial_byte_size, level, ct),
 201   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 202   _debug_collection_type(Concurrent_collection_type),
 203   _did_compact(false)
 204 {
 205   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 206   HeapWord* end    = (HeapWord*) _virtual_space.high();
 207 
 208   _direct_allocated_words = 0;
 209   NOT_PRODUCT(
 210     _numObjectsPromoted = 0;
 211     _numWordsPromoted = 0;
 212     _numObjectsAllocated = 0;
 213     _numWordsAllocated = 0;
 214   )
 215 
 216   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 217                                            use_adaptive_freelists,
 218                                            dictionaryChoice);
 219   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 220   if (_cmsSpace == NULL) {
 221     vm_exit_during_initialization(
 222       "CompactibleFreeListSpace allocation failure");
 223   }
 224   _cmsSpace->_gen = this;
 225 
 226   _gc_stats = new CMSGCStats();
 227 
 228   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 229   // offsets match. The ability to tell free chunks from objects
 230   // depends on this property.
 231   debug_only(
 232     FreeChunk* junk = NULL;
 233     assert(UseCompressedKlassPointers ||
 234            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 235            "Offset of FreeChunk::_prev within FreeChunk must match"
 236            "  that of OopDesc::_klass within OopDesc");
 237   )
 238   if (CollectedHeap::use_parallel_gc_threads()) {
 239     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 240     _par_gc_thread_states =
 241       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 242     if (_par_gc_thread_states == NULL) {
 243       vm_exit_during_initialization("Could not allocate par gc structs");
 244     }
 245     for (uint i = 0; i < ParallelGCThreads; i++) {
 246       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 247       if (_par_gc_thread_states[i] == NULL) {
 248         vm_exit_during_initialization("Could not allocate par gc structs");
 249       }
 250     }
 251   } else {
 252     _par_gc_thread_states = NULL;
 253   }
 254   _incremental_collection_failed = false;
 255   // The "dilatation_factor" is the expansion that can occur on
 256   // account of the fact that the minimum object size in the CMS
 257   // generation may be larger than that in, say, a contiguous young
 258   //  generation.
 259   // Ideally, in the calculation below, we'd compute the dilatation
 260   // factor as: MinChunkSize/(promoting_gen's min object size)
 261   // Since we do not have such a general query interface for the
 262   // promoting generation, we'll instead just use the mimimum
 263   // object size (which today is a header's worth of space);
 264   // note that all arithmetic is in units of HeapWords.
 265   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 266   assert(_dilatation_factor >= 1.0, "from previous assert");
 267 }
 268 
 269 
 270 // The field "_initiating_occupancy" represents the occupancy percentage
 271 // at which we trigger a new collection cycle.  Unless explicitly specified
 272 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 273 // is calculated by:
 274 //
 275 //   Let "f" be MinHeapFreeRatio in
 276 //
 277 //    _intiating_occupancy = 100-f +
 278 //                           f * (CMSTriggerRatio/100)
 279 //   where CMSTriggerRatio is the argument "tr" below.
 280 //
 281 // That is, if we assume the heap is at its desired maximum occupancy at the
 282 // end of a collection, we let CMSTriggerRatio of the (purported) free
 283 // space be allocated before initiating a new collection cycle.
 284 //
 285 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 286   assert(io <= 100 && tr <= 100, "Check the arguments");
 287   if (io >= 0) {
 288     _initiating_occupancy = (double)io / 100.0;
 289   } else {
 290     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 291                              (double)(tr * MinHeapFreeRatio) / 100.0)
 292                             / 100.0;
 293   }
 294 }
 295 
 296 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 297   assert(collector() != NULL, "no collector");
 298   collector()->ref_processor_init();
 299 }
 300 
 301 void CMSCollector::ref_processor_init() {
 302   if (_ref_processor == NULL) {
 303     // Allocate and initialize a reference processor
 304     _ref_processor =
 305       new ReferenceProcessor(_span,                               // span
 306                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 307                              (int) ParallelGCThreads,             // mt processing degree
 308                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 309                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 310                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 311                              &_is_alive_closure,                  // closure for liveness info
 312                              false);                              // next field updates do not need write barrier
 313     // Initialize the _ref_processor field of CMSGen
 314     _cmsGen->set_ref_processor(_ref_processor);
 315 
 316   }
 317 }
 318 
 319 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 320   GenCollectedHeap* gch = GenCollectedHeap::heap();
 321   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 322     "Wrong type of heap");
 323   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 324     gch->gen_policy()->size_policy();
 325   assert(sp->is_gc_cms_adaptive_size_policy(),
 326     "Wrong type of size policy");
 327   return sp;
 328 }
 329 
 330 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
 331   CMSGCAdaptivePolicyCounters* results =
 332     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
 333   assert(
 334     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
 335     "Wrong gc policy counter kind");
 336   return results;
 337 }
 338 
 339 
 340 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 341 
 342   const char* gen_name = "old";
 343 
 344   // Generation Counters - generation 1, 1 subspace
 345   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
 346 
 347   _space_counters = new GSpaceCounters(gen_name, 0,
 348                                        _virtual_space.reserved_size(),
 349                                        this, _gen_counters);
 350 }
 351 
 352 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 353   _cms_gen(cms_gen)
 354 {
 355   assert(alpha <= 100, "bad value");
 356   _saved_alpha = alpha;
 357 
 358   // Initialize the alphas to the bootstrap value of 100.
 359   _gc0_alpha = _cms_alpha = 100;
 360 
 361   _cms_begin_time.update();
 362   _cms_end_time.update();
 363 
 364   _gc0_duration = 0.0;
 365   _gc0_period = 0.0;
 366   _gc0_promoted = 0;
 367 
 368   _cms_duration = 0.0;
 369   _cms_period = 0.0;
 370   _cms_allocated = 0;
 371 
 372   _cms_used_at_gc0_begin = 0;
 373   _cms_used_at_gc0_end = 0;
 374   _allow_duty_cycle_reduction = false;
 375   _valid_bits = 0;
 376   _icms_duty_cycle = CMSIncrementalDutyCycle;
 377 }
 378 
 379 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 380   // TBD: CR 6909490
 381   return 1.0;
 382 }
 383 
 384 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 385 }
 386 
 387 // If promotion failure handling is on use
 388 // the padded average size of the promotion for each
 389 // young generation collection.
 390 double CMSStats::time_until_cms_gen_full() const {
 391   size_t cms_free = _cms_gen->cmsSpace()->free();
 392   GenCollectedHeap* gch = GenCollectedHeap::heap();
 393   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 394                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 395   if (cms_free > expected_promotion) {
 396     // Start a cms collection if there isn't enough space to promote
 397     // for the next minor collection.  Use the padded average as
 398     // a safety factor.
 399     cms_free -= expected_promotion;
 400 
 401     // Adjust by the safety factor.
 402     double cms_free_dbl = (double)cms_free;
 403     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 404     // Apply a further correction factor which tries to adjust
 405     // for recent occurance of concurrent mode failures.
 406     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 407     cms_free_dbl = cms_free_dbl * cms_adjustment;
 408 
 409     if (PrintGCDetails && Verbose) {
 410       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 411         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 412         cms_free, expected_promotion);
 413       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 414         cms_free_dbl, cms_consumption_rate() + 1.0);
 415     }
 416     // Add 1 in case the consumption rate goes to zero.
 417     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 418   }
 419   return 0.0;
 420 }
 421 
 422 // Compare the duration of the cms collection to the
 423 // time remaining before the cms generation is empty.
 424 // Note that the time from the start of the cms collection
 425 // to the start of the cms sweep (less than the total
 426 // duration of the cms collection) can be used.  This
 427 // has been tried and some applications experienced
 428 // promotion failures early in execution.  This was
 429 // possibly because the averages were not accurate
 430 // enough at the beginning.
 431 double CMSStats::time_until_cms_start() const {
 432   // We add "gc0_period" to the "work" calculation
 433   // below because this query is done (mostly) at the
 434   // end of a scavenge, so we need to conservatively
 435   // account for that much possible delay
 436   // in the query so as to avoid concurrent mode failures
 437   // due to starting the collection just a wee bit too
 438   // late.
 439   double work = cms_duration() + gc0_period();
 440   double deadline = time_until_cms_gen_full();
 441   // If a concurrent mode failure occurred recently, we want to be
 442   // more conservative and halve our expected time_until_cms_gen_full()
 443   if (work > deadline) {
 444     if (Verbose && PrintGCDetails) {
 445       gclog_or_tty->print(
 446         " CMSCollector: collect because of anticipated promotion "
 447         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 448         gc0_period(), time_until_cms_gen_full());
 449     }
 450     return 0.0;
 451   }
 452   return work - deadline;
 453 }
 454 
 455 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 456 // amount of change to prevent wild oscillation.
 457 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 458                                               unsigned int new_duty_cycle) {
 459   assert(old_duty_cycle <= 100, "bad input value");
 460   assert(new_duty_cycle <= 100, "bad input value");
 461 
 462   // Note:  use subtraction with caution since it may underflow (values are
 463   // unsigned).  Addition is safe since we're in the range 0-100.
 464   unsigned int damped_duty_cycle = new_duty_cycle;
 465   if (new_duty_cycle < old_duty_cycle) {
 466     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 467     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 468       damped_duty_cycle = old_duty_cycle - largest_delta;
 469     }
 470   } else if (new_duty_cycle > old_duty_cycle) {
 471     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 472     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 473       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 474     }
 475   }
 476   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 477 
 478   if (CMSTraceIncrementalPacing) {
 479     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 480                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 481   }
 482   return damped_duty_cycle;
 483 }
 484 
 485 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 486   assert(CMSIncrementalPacing && valid(),
 487          "should be handled in icms_update_duty_cycle()");
 488 
 489   double cms_time_so_far = cms_timer().seconds();
 490   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 491   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 492 
 493   // Avoid division by 0.
 494   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 495   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 496 
 497   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 498   if (new_duty_cycle > _icms_duty_cycle) {
 499     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 500     if (new_duty_cycle > 2) {
 501       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 502                                                 new_duty_cycle);
 503     }
 504   } else if (_allow_duty_cycle_reduction) {
 505     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 506     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 507     // Respect the minimum duty cycle.
 508     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 509     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 510   }
 511 
 512   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 513     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 514   }
 515 
 516   _allow_duty_cycle_reduction = false;
 517   return _icms_duty_cycle;
 518 }
 519 
 520 #ifndef PRODUCT
 521 void CMSStats::print_on(outputStream *st) const {
 522   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 523   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 524                gc0_duration(), gc0_period(), gc0_promoted());
 525   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 526             cms_duration(), cms_duration_per_mb(),
 527             cms_period(), cms_allocated());
 528   st->print(",cms_since_beg=%g,cms_since_end=%g",
 529             cms_time_since_begin(), cms_time_since_end());
 530   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 531             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 532   if (CMSIncrementalMode) {
 533     st->print(",dc=%d", icms_duty_cycle());
 534   }
 535 
 536   if (valid()) {
 537     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 538               promotion_rate(), cms_allocation_rate());
 539     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 540               cms_consumption_rate(), time_until_cms_gen_full());
 541   }
 542   st->print(" ");
 543 }
 544 #endif // #ifndef PRODUCT
 545 
 546 CMSCollector::CollectorState CMSCollector::_collectorState =
 547                              CMSCollector::Idling;
 548 bool CMSCollector::_foregroundGCIsActive = false;
 549 bool CMSCollector::_foregroundGCShouldWait = false;
 550 
 551 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 552                            CardTableRS*                   ct,
 553                            ConcurrentMarkSweepPolicy*     cp):
 554   _cmsGen(cmsGen),
 555   _ct(ct),
 556   _ref_processor(NULL),    // will be set later
 557   _conc_workers(NULL),     // may be set later
 558   _abort_preclean(false),
 559   _start_sampling(false),
 560   _between_prologue_and_epilogue(false),
 561   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 562   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 563                  -1 /* lock-free */, "No_lock" /* dummy */),
 564   _modUnionClosure(&_modUnionTable),
 565   _modUnionClosurePar(&_modUnionTable),
 566   // Adjust my span to cover old (cms) gen
 567   _span(cmsGen->reserved()),
 568   // Construct the is_alive_closure with _span & markBitMap
 569   _is_alive_closure(_span, &_markBitMap),
 570   _restart_addr(NULL),
 571   _overflow_list(NULL),
 572   _stats(cmsGen),
 573   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 574   _eden_chunk_array(NULL),     // may be set in ctor body
 575   _eden_chunk_capacity(0),     // -- ditto --
 576   _eden_chunk_index(0),        // -- ditto --
 577   _survivor_plab_array(NULL),  // -- ditto --
 578   _survivor_chunk_array(NULL), // -- ditto --
 579   _survivor_chunk_capacity(0), // -- ditto --
 580   _survivor_chunk_index(0),    // -- ditto --
 581   _ser_pmc_preclean_ovflw(0),
 582   _ser_kac_preclean_ovflw(0),
 583   _ser_pmc_remark_ovflw(0),
 584   _par_pmc_remark_ovflw(0),
 585   _ser_kac_ovflw(0),
 586   _par_kac_ovflw(0),
 587 #ifndef PRODUCT
 588   _num_par_pushes(0),
 589 #endif
 590   _collection_count_start(0),
 591   _verifying(false),
 592   _icms_start_limit(NULL),
 593   _icms_stop_limit(NULL),
 594   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 595   _completed_initialization(false),
 596   _collector_policy(cp),
 597   _should_unload_classes(false),
 598   _concurrent_cycles_since_last_unload(0),
 599   _roots_scanning_options(0),
 600   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 601   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 602   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 603   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 604   _cms_start_registered(false)
 605 {
 606   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 607     ExplicitGCInvokesConcurrent = true;
 608   }
 609   // Now expand the span and allocate the collection support structures
 610   // (MUT, marking bit map etc.) to cover both generations subject to
 611   // collection.
 612 
 613   // For use by dirty card to oop closures.
 614   _cmsGen->cmsSpace()->set_collector(this);
 615 
 616   // Allocate MUT and marking bit map
 617   {
 618     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 619     if (!_markBitMap.allocate(_span)) {
 620       warning("Failed to allocate CMS Bit Map");
 621       return;
 622     }
 623     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 624   }
 625   {
 626     _modUnionTable.allocate(_span);
 627     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 628   }
 629 
 630   if (!_markStack.allocate(MarkStackSize)) {
 631     warning("Failed to allocate CMS Marking Stack");
 632     return;
 633   }
 634 
 635   // Support for multi-threaded concurrent phases
 636   if (CMSConcurrentMTEnabled) {
 637     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 638       // just for now
 639       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 640     }
 641     if (ConcGCThreads > 1) {
 642       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 643                                  ConcGCThreads, true);
 644       if (_conc_workers == NULL) {
 645         warning("GC/CMS: _conc_workers allocation failure: "
 646               "forcing -CMSConcurrentMTEnabled");
 647         CMSConcurrentMTEnabled = false;
 648       } else {
 649         _conc_workers->initialize_workers();
 650       }
 651     } else {
 652       CMSConcurrentMTEnabled = false;
 653     }
 654   }
 655   if (!CMSConcurrentMTEnabled) {
 656     ConcGCThreads = 0;
 657   } else {
 658     // Turn off CMSCleanOnEnter optimization temporarily for
 659     // the MT case where it's not fixed yet; see 6178663.
 660     CMSCleanOnEnter = false;
 661   }
 662   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 663          "Inconsistency");
 664 
 665   // Parallel task queues; these are shared for the
 666   // concurrent and stop-world phases of CMS, but
 667   // are not shared with parallel scavenge (ParNew).
 668   {
 669     uint i;
 670     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 671 
 672     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 673          || ParallelRefProcEnabled)
 674         && num_queues > 0) {
 675       _task_queues = new OopTaskQueueSet(num_queues);
 676       if (_task_queues == NULL) {
 677         warning("task_queues allocation failure.");
 678         return;
 679       }
 680       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 681       if (_hash_seed == NULL) {
 682         warning("_hash_seed array allocation failure");
 683         return;
 684       }
 685 
 686       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 687       for (i = 0; i < num_queues; i++) {
 688         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 689         if (q == NULL) {
 690           warning("work_queue allocation failure.");
 691           return;
 692         }
 693         _task_queues->register_queue(i, q);
 694       }
 695       for (i = 0; i < num_queues; i++) {
 696         _task_queues->queue(i)->initialize();
 697         _hash_seed[i] = 17;  // copied from ParNew
 698       }
 699     }
 700   }
 701 
 702   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 703 
 704   // Clip CMSBootstrapOccupancy between 0 and 100.
 705   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 706 
 707   _full_gcs_since_conc_gc = 0;
 708 
 709   // Now tell CMS generations the identity of their collector
 710   ConcurrentMarkSweepGeneration::set_collector(this);
 711 
 712   // Create & start a CMS thread for this CMS collector
 713   _cmsThread = ConcurrentMarkSweepThread::start(this);
 714   assert(cmsThread() != NULL, "CMS Thread should have been created");
 715   assert(cmsThread()->collector() == this,
 716          "CMS Thread should refer to this gen");
 717   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 718 
 719   // Support for parallelizing young gen rescan
 720   GenCollectedHeap* gch = GenCollectedHeap::heap();
 721   _young_gen = gch->prev_gen(_cmsGen);
 722   if (gch->supports_inline_contig_alloc()) {
 723     _top_addr = gch->top_addr();
 724     _end_addr = gch->end_addr();
 725     assert(_young_gen != NULL, "no _young_gen");
 726     _eden_chunk_index = 0;
 727     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 728     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 729     if (_eden_chunk_array == NULL) {
 730       _eden_chunk_capacity = 0;
 731       warning("GC/CMS: _eden_chunk_array allocation failure");
 732     }
 733   }
 734   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 735 
 736   // Support for parallelizing survivor space rescan
 737   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 738     const size_t max_plab_samples =
 739       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 740 
 741     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 742     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 743     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 744     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 745         || _cursor == NULL) {
 746       warning("Failed to allocate survivor plab/chunk array");
 747       if (_survivor_plab_array  != NULL) {
 748         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 749         _survivor_plab_array = NULL;
 750       }
 751       if (_survivor_chunk_array != NULL) {
 752         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 753         _survivor_chunk_array = NULL;
 754       }
 755       if (_cursor != NULL) {
 756         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 757         _cursor = NULL;
 758       }
 759     } else {
 760       _survivor_chunk_capacity = 2*max_plab_samples;
 761       for (uint i = 0; i < ParallelGCThreads; i++) {
 762         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 763         if (vec == NULL) {
 764           warning("Failed to allocate survivor plab array");
 765           for (int j = i; j > 0; j--) {
 766             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
 767           }
 768           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 769           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 770           _survivor_plab_array = NULL;
 771           _survivor_chunk_array = NULL;
 772           _survivor_chunk_capacity = 0;
 773           break;
 774         } else {
 775           ChunkArray* cur =
 776             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 777                                                         max_plab_samples);
 778           assert(cur->end() == 0, "Should be 0");
 779           assert(cur->array() == vec, "Should be vec");
 780           assert(cur->capacity() == max_plab_samples, "Error");
 781         }
 782       }
 783     }
 784   }
 785   assert(   (   _survivor_plab_array  != NULL
 786              && _survivor_chunk_array != NULL)
 787          || (   _survivor_chunk_capacity == 0
 788              && _survivor_chunk_index == 0),
 789          "Error");
 790 
 791   // Choose what strong roots should be scanned depending on verification options
 792   if (!CMSClassUnloadingEnabled) {
 793     // If class unloading is disabled we want to include all classes into the root set.
 794     add_root_scanning_option(SharedHeap::SO_AllClasses);
 795   } else {
 796     add_root_scanning_option(SharedHeap::SO_SystemClasses);
 797   }
 798 
 799   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 800   _gc_counters = new CollectorCounters("CMS", 1);
 801   _completed_initialization = true;
 802   _inter_sweep_timer.start();  // start of time
 803 }
 804 
 805 const char* ConcurrentMarkSweepGeneration::name() const {
 806   return "concurrent mark-sweep generation";
 807 }
 808 void ConcurrentMarkSweepGeneration::update_counters() {
 809   if (UsePerfData) {
 810     _space_counters->update_all();
 811     _gen_counters->update_all();
 812   }
 813 }
 814 
 815 // this is an optimized version of update_counters(). it takes the
 816 // used value as a parameter rather than computing it.
 817 //
 818 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 819   if (UsePerfData) {
 820     _space_counters->update_used(used);
 821     _space_counters->update_capacity();
 822     _gen_counters->update_all();
 823   }
 824 }
 825 
 826 void ConcurrentMarkSweepGeneration::print() const {
 827   Generation::print();
 828   cmsSpace()->print();
 829 }
 830 
 831 #ifndef PRODUCT
 832 void ConcurrentMarkSweepGeneration::print_statistics() {
 833   cmsSpace()->printFLCensus(0);
 834 }
 835 #endif
 836 
 837 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 838   GenCollectedHeap* gch = GenCollectedHeap::heap();
 839   if (PrintGCDetails) {
 840     if (Verbose) {
 841       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 842         level(), short_name(), s, used(), capacity());
 843     } else {
 844       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 845         level(), short_name(), s, used() / K, capacity() / K);
 846     }
 847   }
 848   if (Verbose) {
 849     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 850               gch->used(), gch->capacity());
 851   } else {
 852     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 853               gch->used() / K, gch->capacity() / K);
 854   }
 855 }
 856 
 857 size_t
 858 ConcurrentMarkSweepGeneration::contiguous_available() const {
 859   // dld proposes an improvement in precision here. If the committed
 860   // part of the space ends in a free block we should add that to
 861   // uncommitted size in the calculation below. Will make this
 862   // change later, staying with the approximation below for the
 863   // time being. -- ysr.
 864   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 865 }
 866 
 867 size_t
 868 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 869   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 870 }
 871 
 872 size_t ConcurrentMarkSweepGeneration::max_available() const {
 873   return free() + _virtual_space.uncommitted_size();
 874 }
 875 
 876 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 877   size_t available = max_available();
 878   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 879   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 880   if (Verbose && PrintGCDetails) {
 881     gclog_or_tty->print_cr(
 882       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 883       "max_promo("SIZE_FORMAT")",
 884       res? "":" not", available, res? ">=":"<",
 885       av_promo, max_promotion_in_bytes);
 886   }
 887   return res;
 888 }
 889 
 890 // At a promotion failure dump information on block layout in heap
 891 // (cms old generation).
 892 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 893   if (CMSDumpAtPromotionFailure) {
 894     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 895   }
 896 }
 897 
 898 CompactibleSpace*
 899 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 900   return _cmsSpace;
 901 }
 902 
 903 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 904   // Clear the promotion information.  These pointers can be adjusted
 905   // along with all the other pointers into the heap but
 906   // compaction is expected to be a rare event with
 907   // a heap using cms so don't do it without seeing the need.
 908   if (CollectedHeap::use_parallel_gc_threads()) {
 909     for (uint i = 0; i < ParallelGCThreads; i++) {
 910       _par_gc_thread_states[i]->promo.reset();
 911     }
 912   }
 913 }
 914 
 915 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
 916   blk->do_space(_cmsSpace);
 917 }
 918 
 919 void ConcurrentMarkSweepGeneration::compute_new_size() {
 920   assert_locked_or_safepoint(Heap_lock);
 921 
 922   // If incremental collection failed, we just want to expand
 923   // to the limit.
 924   if (incremental_collection_failed()) {
 925     clear_incremental_collection_failed();
 926     grow_to_reserved();
 927     return;
 928   }
 929 
 930   // The heap has been compacted but not reset yet.
 931   // Any metric such as free() or used() will be incorrect.
 932 
 933   CardGeneration::compute_new_size();
 934 
 935   // Reset again after a possible resizing
 936   if (did_compact()) {
 937     cmsSpace()->reset_after_compaction();
 938   }
 939 }
 940 
 941 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 942   assert_locked_or_safepoint(Heap_lock);
 943 
 944   // If incremental collection failed, we just want to expand
 945   // to the limit.
 946   if (incremental_collection_failed()) {
 947     clear_incremental_collection_failed();
 948     grow_to_reserved();
 949     return;
 950   }
 951 
 952   double free_percentage = ((double) free()) / capacity();
 953   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 954   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 955 
 956   // compute expansion delta needed for reaching desired free percentage
 957   if (free_percentage < desired_free_percentage) {
 958     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 959     assert(desired_capacity >= capacity(), "invalid expansion size");
 960     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 961     if (PrintGCDetails && Verbose) {
 962       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 963       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 964       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 965       gclog_or_tty->print_cr("  Desired free fraction %f",
 966         desired_free_percentage);
 967       gclog_or_tty->print_cr("  Maximum free fraction %f",
 968         maximum_free_percentage);
 969       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
 970       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 971         desired_capacity/1000);
 972       int prev_level = level() - 1;
 973       if (prev_level >= 0) {
 974         size_t prev_size = 0;
 975         GenCollectedHeap* gch = GenCollectedHeap::heap();
 976         Generation* prev_gen = gch->_gens[prev_level];
 977         prev_size = prev_gen->capacity();
 978           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 979                                  prev_size/1000);
 980       }
 981       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 982         unsafe_max_alloc_nogc()/1000);
 983       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 984         contiguous_available()/1000);
 985       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 986         expand_bytes);
 987     }
 988     // safe if expansion fails
 989     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 990     if (PrintGCDetails && Verbose) {
 991       gclog_or_tty->print_cr("  Expanded free fraction %f",
 992         ((double) free()) / capacity());
 993     }
 994   } else {
 995     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 996     assert(desired_capacity <= capacity(), "invalid expansion size");
 997     size_t shrink_bytes = capacity() - desired_capacity;
 998     // Don't shrink unless the delta is greater than the minimum shrink we want
 999     if (shrink_bytes >= MinHeapDeltaBytes) {
1000       shrink_free_list_by(shrink_bytes);
1001     }
1002   }
1003 }
1004 
1005 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1006   return cmsSpace()->freelistLock();
1007 }
1008 
1009 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1010                                                   bool   tlab) {
1011   CMSSynchronousYieldRequest yr;
1012   MutexLockerEx x(freelistLock(),
1013                   Mutex::_no_safepoint_check_flag);
1014   return have_lock_and_allocate(size, tlab);
1015 }
1016 
1017 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1018                                                   bool   tlab /* ignored */) {
1019   assert_lock_strong(freelistLock());
1020   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1021   HeapWord* res = cmsSpace()->allocate(adjustedSize);
1022   // Allocate the object live (grey) if the background collector has
1023   // started marking. This is necessary because the marker may
1024   // have passed this address and consequently this object will
1025   // not otherwise be greyed and would be incorrectly swept up.
1026   // Note that if this object contains references, the writing
1027   // of those references will dirty the card containing this object
1028   // allowing the object to be blackened (and its references scanned)
1029   // either during a preclean phase or at the final checkpoint.
1030   if (res != NULL) {
1031     // We may block here with an uninitialized object with
1032     // its mark-bit or P-bits not yet set. Such objects need
1033     // to be safely navigable by block_start().
1034     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1035     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1036     collector()->direct_allocated(res, adjustedSize);
1037     _direct_allocated_words += adjustedSize;
1038     // allocation counters
1039     NOT_PRODUCT(
1040       _numObjectsAllocated++;
1041       _numWordsAllocated += (int)adjustedSize;
1042     )
1043   }
1044   return res;
1045 }
1046 
1047 // In the case of direct allocation by mutators in a generation that
1048 // is being concurrently collected, the object must be allocated
1049 // live (grey) if the background collector has started marking.
1050 // This is necessary because the marker may
1051 // have passed this address and consequently this object will
1052 // not otherwise be greyed and would be incorrectly swept up.
1053 // Note that if this object contains references, the writing
1054 // of those references will dirty the card containing this object
1055 // allowing the object to be blackened (and its references scanned)
1056 // either during a preclean phase or at the final checkpoint.
1057 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1058   assert(_markBitMap.covers(start, size), "Out of bounds");
1059   if (_collectorState >= Marking) {
1060     MutexLockerEx y(_markBitMap.lock(),
1061                     Mutex::_no_safepoint_check_flag);
1062     // [see comments preceding SweepClosure::do_blk() below for details]
1063     //
1064     // Can the P-bits be deleted now?  JJJ
1065     //
1066     // 1. need to mark the object as live so it isn't collected
1067     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1068     // 3. need to mark the end of the object so marking, precleaning or sweeping
1069     //    can skip over uninitialized or unparsable objects. An allocated
1070     //    object is considered uninitialized for our purposes as long as
1071     //    its klass word is NULL.  All old gen objects are parsable
1072     //    as soon as they are initialized.)
1073     _markBitMap.mark(start);          // object is live
1074     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
1075     _markBitMap.mark(start + size - 1);
1076                                       // mark end of object
1077   }
1078   // check that oop looks uninitialized
1079   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1080 }
1081 
1082 void CMSCollector::promoted(bool par, HeapWord* start,
1083                             bool is_obj_array, size_t obj_size) {
1084   assert(_markBitMap.covers(start), "Out of bounds");
1085   // See comment in direct_allocated() about when objects should
1086   // be allocated live.
1087   if (_collectorState >= Marking) {
1088     // we already hold the marking bit map lock, taken in
1089     // the prologue
1090     if (par) {
1091       _markBitMap.par_mark(start);
1092     } else {
1093       _markBitMap.mark(start);
1094     }
1095     // We don't need to mark the object as uninitialized (as
1096     // in direct_allocated above) because this is being done with the
1097     // world stopped and the object will be initialized by the
1098     // time the marking, precleaning or sweeping get to look at it.
1099     // But see the code for copying objects into the CMS generation,
1100     // where we need to ensure that concurrent readers of the
1101     // block offset table are able to safely navigate a block that
1102     // is in flux from being free to being allocated (and in
1103     // transition while being copied into) and subsequently
1104     // becoming a bona-fide object when the copy/promotion is complete.
1105     assert(SafepointSynchronize::is_at_safepoint(),
1106            "expect promotion only at safepoints");
1107 
1108     if (_collectorState < Sweeping) {
1109       // Mark the appropriate cards in the modUnionTable, so that
1110       // this object gets scanned before the sweep. If this is
1111       // not done, CMS generation references in the object might
1112       // not get marked.
1113       // For the case of arrays, which are otherwise precisely
1114       // marked, we need to dirty the entire array, not just its head.
1115       if (is_obj_array) {
1116         // The [par_]mark_range() method expects mr.end() below to
1117         // be aligned to the granularity of a bit's representation
1118         // in the heap. In the case of the MUT below, that's a
1119         // card size.
1120         MemRegion mr(start,
1121                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1122                         CardTableModRefBS::card_size /* bytes */));
1123         if (par) {
1124           _modUnionTable.par_mark_range(mr);
1125         } else {
1126           _modUnionTable.mark_range(mr);
1127         }
1128       } else {  // not an obj array; we can just mark the head
1129         if (par) {
1130           _modUnionTable.par_mark(start);
1131         } else {
1132           _modUnionTable.mark(start);
1133         }
1134       }
1135     }
1136   }
1137 }
1138 
1139 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1140 {
1141   size_t delta = pointer_delta(addr, space->bottom());
1142   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1143 }
1144 
1145 void CMSCollector::icms_update_allocation_limits()
1146 {
1147   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1148   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1149 
1150   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1151   if (CMSTraceIncrementalPacing) {
1152     stats().print();
1153   }
1154 
1155   assert(duty_cycle <= 100, "invalid duty cycle");
1156   if (duty_cycle != 0) {
1157     // The duty_cycle is a percentage between 0 and 100; convert to words and
1158     // then compute the offset from the endpoints of the space.
1159     size_t free_words = eden->free() / HeapWordSize;
1160     double free_words_dbl = (double)free_words;
1161     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1162     size_t offset_words = (free_words - duty_cycle_words) / 2;
1163 
1164     _icms_start_limit = eden->top() + offset_words;
1165     _icms_stop_limit = eden->end() - offset_words;
1166 
1167     // The limits may be adjusted (shifted to the right) by
1168     // CMSIncrementalOffset, to allow the application more mutator time after a
1169     // young gen gc (when all mutators were stopped) and before CMS starts and
1170     // takes away one or more cpus.
1171     if (CMSIncrementalOffset != 0) {
1172       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1173       size_t adjustment = (size_t)adjustment_dbl;
1174       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1175       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1176         _icms_start_limit += adjustment;
1177         _icms_stop_limit = tmp_stop;
1178       }
1179     }
1180   }
1181   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1182     _icms_start_limit = _icms_stop_limit = eden->end();
1183   }
1184 
1185   // Install the new start limit.
1186   eden->set_soft_end(_icms_start_limit);
1187 
1188   if (CMSTraceIncrementalMode) {
1189     gclog_or_tty->print(" icms alloc limits:  "
1190                            PTR_FORMAT "," PTR_FORMAT
1191                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1192                            _icms_start_limit, _icms_stop_limit,
1193                            percent_of_space(eden, _icms_start_limit),
1194                            percent_of_space(eden, _icms_stop_limit));
1195     if (Verbose) {
1196       gclog_or_tty->print("eden:  ");
1197       eden->print_on(gclog_or_tty);
1198     }
1199   }
1200 }
1201 
1202 // Any changes here should try to maintain the invariant
1203 // that if this method is called with _icms_start_limit
1204 // and _icms_stop_limit both NULL, then it should return NULL
1205 // and not notify the icms thread.
1206 HeapWord*
1207 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1208                                        size_t word_size)
1209 {
1210   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1211   // nop.
1212   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1213     if (top <= _icms_start_limit) {
1214       if (CMSTraceIncrementalMode) {
1215         space->print_on(gclog_or_tty);
1216         gclog_or_tty->stamp();
1217         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1218                                ", new limit=" PTR_FORMAT
1219                                " (" SIZE_FORMAT "%%)",
1220                                top, _icms_stop_limit,
1221                                percent_of_space(space, _icms_stop_limit));
1222       }
1223       ConcurrentMarkSweepThread::start_icms();
1224       assert(top < _icms_stop_limit, "Tautology");
1225       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1226         return _icms_stop_limit;
1227       }
1228 
1229       // The allocation will cross both the _start and _stop limits, so do the
1230       // stop notification also and return end().
1231       if (CMSTraceIncrementalMode) {
1232         space->print_on(gclog_or_tty);
1233         gclog_or_tty->stamp();
1234         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1235                                ", new limit=" PTR_FORMAT
1236                                " (" SIZE_FORMAT "%%)",
1237                                top, space->end(),
1238                                percent_of_space(space, space->end()));
1239       }
1240       ConcurrentMarkSweepThread::stop_icms();
1241       return space->end();
1242     }
1243 
1244     if (top <= _icms_stop_limit) {
1245       if (CMSTraceIncrementalMode) {
1246         space->print_on(gclog_or_tty);
1247         gclog_or_tty->stamp();
1248         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1249                                ", new limit=" PTR_FORMAT
1250                                " (" SIZE_FORMAT "%%)",
1251                                top, space->end(),
1252                                percent_of_space(space, space->end()));
1253       }
1254       ConcurrentMarkSweepThread::stop_icms();
1255       return space->end();
1256     }
1257 
1258     if (CMSTraceIncrementalMode) {
1259       space->print_on(gclog_or_tty);
1260       gclog_or_tty->stamp();
1261       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1262                              ", new limit=" PTR_FORMAT,
1263                              top, NULL);
1264     }
1265   }
1266 
1267   return NULL;
1268 }
1269 
1270 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1271   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1272   // allocate, copy and if necessary update promoinfo --
1273   // delegate to underlying space.
1274   assert_lock_strong(freelistLock());
1275 
1276 #ifndef PRODUCT
1277   if (Universe::heap()->promotion_should_fail()) {
1278     return NULL;
1279   }
1280 #endif  // #ifndef PRODUCT
1281 
1282   oop res = _cmsSpace->promote(obj, obj_size);
1283   if (res == NULL) {
1284     // expand and retry
1285     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1286     expand(s*HeapWordSize, MinHeapDeltaBytes,
1287       CMSExpansionCause::_satisfy_promotion);
1288     // Since there's currently no next generation, we don't try to promote
1289     // into a more senior generation.
1290     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1291                                "is made to pass on a possibly failing "
1292                                "promotion to next generation");
1293     res = _cmsSpace->promote(obj, obj_size);
1294   }
1295   if (res != NULL) {
1296     // See comment in allocate() about when objects should
1297     // be allocated live.
1298     assert(obj->is_oop(), "Will dereference klass pointer below");
1299     collector()->promoted(false,           // Not parallel
1300                           (HeapWord*)res, obj->is_objArray(), obj_size);
1301     // promotion counters
1302     NOT_PRODUCT(
1303       _numObjectsPromoted++;
1304       _numWordsPromoted +=
1305         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1306     )
1307   }
1308   return res;
1309 }
1310 
1311 
1312 HeapWord*
1313 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1314                                              HeapWord* top,
1315                                              size_t word_sz)
1316 {
1317   return collector()->allocation_limit_reached(space, top, word_sz);
1318 }
1319 
1320 // IMPORTANT: Notes on object size recognition in CMS.
1321 // ---------------------------------------------------
1322 // A block of storage in the CMS generation is always in
1323 // one of three states. A free block (FREE), an allocated
1324 // object (OBJECT) whose size() method reports the correct size,
1325 // and an intermediate state (TRANSIENT) in which its size cannot
1326 // be accurately determined.
1327 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1328 // -----------------------------------------------------
1329 // FREE:      klass_word & 1 == 1; mark_word holds block size
1330 //
1331 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1332 //            obj->size() computes correct size
1333 //
1334 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1335 //
1336 // STATE IDENTIFICATION: (64 bit+COOPS)
1337 // ------------------------------------
1338 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1339 //
1340 // OBJECT:    klass_word installed; klass_word != 0;
1341 //            obj->size() computes correct size
1342 //
1343 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1344 //
1345 //
1346 // STATE TRANSITION DIAGRAM
1347 //
1348 //        mut / parnew                     mut  /  parnew
1349 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1350 //  ^                                                                   |
1351 //  |------------------------ DEAD <------------------------------------|
1352 //         sweep                            mut
1353 //
1354 // While a block is in TRANSIENT state its size cannot be determined
1355 // so readers will either need to come back later or stall until
1356 // the size can be determined. Note that for the case of direct
1357 // allocation, P-bits, when available, may be used to determine the
1358 // size of an object that may not yet have been initialized.
1359 
1360 // Things to support parallel young-gen collection.
1361 oop
1362 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1363                                            oop old, markOop m,
1364                                            size_t word_sz) {
1365 #ifndef PRODUCT
1366   if (Universe::heap()->promotion_should_fail()) {
1367     return NULL;
1368   }
1369 #endif  // #ifndef PRODUCT
1370 
1371   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1372   PromotionInfo* promoInfo = &ps->promo;
1373   // if we are tracking promotions, then first ensure space for
1374   // promotion (including spooling space for saving header if necessary).
1375   // then allocate and copy, then track promoted info if needed.
1376   // When tracking (see PromotionInfo::track()), the mark word may
1377   // be displaced and in this case restoration of the mark word
1378   // occurs in the (oop_since_save_marks_)iterate phase.
1379   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1380     // Out of space for allocating spooling buffers;
1381     // try expanding and allocating spooling buffers.
1382     if (!expand_and_ensure_spooling_space(promoInfo)) {
1383       return NULL;
1384     }
1385   }
1386   assert(promoInfo->has_spooling_space(), "Control point invariant");
1387   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1388   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1389   if (obj_ptr == NULL) {
1390      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1391      if (obj_ptr == NULL) {
1392        return NULL;
1393      }
1394   }
1395   oop obj = oop(obj_ptr);
1396   OrderAccess::storestore();
1397   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1398   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1399   // IMPORTANT: See note on object initialization for CMS above.
1400   // Otherwise, copy the object.  Here we must be careful to insert the
1401   // klass pointer last, since this marks the block as an allocated object.
1402   // Except with compressed oops it's the mark word.
1403   HeapWord* old_ptr = (HeapWord*)old;
1404   // Restore the mark word copied above.
1405   obj->set_mark(m);
1406   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1407   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1408   OrderAccess::storestore();
1409 
1410   if (UseCompressedKlassPointers) {
1411     // Copy gap missed by (aligned) header size calculation below
1412     obj->set_klass_gap(old->klass_gap());
1413   }
1414   if (word_sz > (size_t)oopDesc::header_size()) {
1415     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1416                                  obj_ptr + oopDesc::header_size(),
1417                                  word_sz - oopDesc::header_size());
1418   }
1419 
1420   // Now we can track the promoted object, if necessary.  We take care
1421   // to delay the transition from uninitialized to full object
1422   // (i.e., insertion of klass pointer) until after, so that it
1423   // atomically becomes a promoted object.
1424   if (promoInfo->tracking()) {
1425     promoInfo->track((PromotedObject*)obj, old->klass());
1426   }
1427   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1428   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1429   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1430 
1431   // Finally, install the klass pointer (this should be volatile).
1432   OrderAccess::storestore();
1433   obj->set_klass(old->klass());
1434   // We should now be able to calculate the right size for this object
1435   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1436 
1437   collector()->promoted(true,          // parallel
1438                         obj_ptr, old->is_objArray(), word_sz);
1439 
1440   NOT_PRODUCT(
1441     Atomic::inc_ptr(&_numObjectsPromoted);
1442     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1443   )
1444 
1445   return obj;
1446 }
1447 
1448 void
1449 ConcurrentMarkSweepGeneration::
1450 par_promote_alloc_undo(int thread_num,
1451                        HeapWord* obj, size_t word_sz) {
1452   // CMS does not support promotion undo.
1453   ShouldNotReachHere();
1454 }
1455 
1456 void
1457 ConcurrentMarkSweepGeneration::
1458 par_promote_alloc_done(int thread_num) {
1459   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1460   ps->lab.retire(thread_num);
1461 }
1462 
1463 void
1464 ConcurrentMarkSweepGeneration::
1465 par_oop_since_save_marks_iterate_done(int thread_num) {
1466   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1467   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1468   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1469 }
1470 
1471 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1472                                                    size_t size,
1473                                                    bool   tlab)
1474 {
1475   // We allow a STW collection only if a full
1476   // collection was requested.
1477   return full || should_allocate(size, tlab); // FIX ME !!!
1478   // This and promotion failure handling are connected at the
1479   // hip and should be fixed by untying them.
1480 }
1481 
1482 bool CMSCollector::shouldConcurrentCollect() {
1483   if (_full_gc_requested) {
1484     if (Verbose && PrintGCDetails) {
1485       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1486                              " gc request (or gc_locker)");
1487     }
1488     return true;
1489   }
1490 
1491   // For debugging purposes, change the type of collection.
1492   // If the rotation is not on the concurrent collection
1493   // type, don't start a concurrent collection.
1494   NOT_PRODUCT(
1495     if (RotateCMSCollectionTypes &&
1496         (_cmsGen->debug_collection_type() !=
1497           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1498       assert(_cmsGen->debug_collection_type() !=
1499         ConcurrentMarkSweepGeneration::Unknown_collection_type,
1500         "Bad cms collection type");
1501       return false;
1502     }
1503   )
1504 
1505   FreelistLocker x(this);
1506   // ------------------------------------------------------------------
1507   // Print out lots of information which affects the initiation of
1508   // a collection.
1509   if (PrintCMSInitiationStatistics && stats().valid()) {
1510     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1511     gclog_or_tty->stamp();
1512     gclog_or_tty->print_cr("");
1513     stats().print_on(gclog_or_tty);
1514     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1515       stats().time_until_cms_gen_full());
1516     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1517     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1518                            _cmsGen->contiguous_available());
1519     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1520     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1521     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1522     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1523     gclog_or_tty->print_cr("metadata initialized %d",
1524       MetaspaceGC::should_concurrent_collect());
1525   }
1526   // ------------------------------------------------------------------
1527 
1528   // If the estimated time to complete a cms collection (cms_duration())
1529   // is less than the estimated time remaining until the cms generation
1530   // is full, start a collection.
1531   if (!UseCMSInitiatingOccupancyOnly) {
1532     if (stats().valid()) {
1533       if (stats().time_until_cms_start() == 0.0) {
1534         return true;
1535       }
1536     } else {
1537       // We want to conservatively collect somewhat early in order
1538       // to try and "bootstrap" our CMS/promotion statistics;
1539       // this branch will not fire after the first successful CMS
1540       // collection because the stats should then be valid.
1541       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1542         if (Verbose && PrintGCDetails) {
1543           gclog_or_tty->print_cr(
1544             " CMSCollector: collect for bootstrapping statistics:"
1545             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1546             _bootstrap_occupancy);
1547         }
1548         return true;
1549       }
1550     }
1551   }
1552 
1553   // Otherwise, we start a collection cycle if
1554   // old gen want a collection cycle started. Each may use
1555   // an appropriate criterion for making this decision.
1556   // XXX We need to make sure that the gen expansion
1557   // criterion dovetails well with this. XXX NEED TO FIX THIS
1558   if (_cmsGen->should_concurrent_collect()) {
1559     if (Verbose && PrintGCDetails) {
1560       gclog_or_tty->print_cr("CMS old gen initiated");
1561     }
1562     return true;
1563   }
1564 
1565   // We start a collection if we believe an incremental collection may fail;
1566   // this is not likely to be productive in practice because it's probably too
1567   // late anyway.
1568   GenCollectedHeap* gch = GenCollectedHeap::heap();
1569   assert(gch->collector_policy()->is_two_generation_policy(),
1570          "You may want to check the correctness of the following");
1571   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1572     if (Verbose && PrintGCDetails) {
1573       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1574     }
1575     return true;
1576   }
1577 
1578   if (MetaspaceGC::should_concurrent_collect()) {
1579       if (Verbose && PrintGCDetails) {
1580       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1581       }
1582       return true;
1583     }
1584 
1585   return false;
1586 }
1587 
1588 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1589 
1590 // Clear _expansion_cause fields of constituent generations
1591 void CMSCollector::clear_expansion_cause() {
1592   _cmsGen->clear_expansion_cause();
1593 }
1594 
1595 // We should be conservative in starting a collection cycle.  To
1596 // start too eagerly runs the risk of collecting too often in the
1597 // extreme.  To collect too rarely falls back on full collections,
1598 // which works, even if not optimum in terms of concurrent work.
1599 // As a work around for too eagerly collecting, use the flag
1600 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1601 // giving the user an easily understandable way of controlling the
1602 // collections.
1603 // We want to start a new collection cycle if any of the following
1604 // conditions hold:
1605 // . our current occupancy exceeds the configured initiating occupancy
1606 //   for this generation, or
1607 // . we recently needed to expand this space and have not, since that
1608 //   expansion, done a collection of this generation, or
1609 // . the underlying space believes that it may be a good idea to initiate
1610 //   a concurrent collection (this may be based on criteria such as the
1611 //   following: the space uses linear allocation and linear allocation is
1612 //   going to fail, or there is believed to be excessive fragmentation in
1613 //   the generation, etc... or ...
1614 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1615 //   the case of the old generation; see CR 6543076):
1616 //   we may be approaching a point at which allocation requests may fail because
1617 //   we will be out of sufficient free space given allocation rate estimates.]
1618 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1619 
1620   assert_lock_strong(freelistLock());
1621   if (occupancy() > initiating_occupancy()) {
1622     if (PrintGCDetails && Verbose) {
1623       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1624         short_name(), occupancy(), initiating_occupancy());
1625     }
1626     return true;
1627   }
1628   if (UseCMSInitiatingOccupancyOnly) {
1629     return false;
1630   }
1631   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1632     if (PrintGCDetails && Verbose) {
1633       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1634         short_name());
1635     }
1636     return true;
1637   }
1638   if (_cmsSpace->should_concurrent_collect()) {
1639     if (PrintGCDetails && Verbose) {
1640       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1641         short_name());
1642     }
1643     return true;
1644   }
1645   return false;
1646 }
1647 
1648 void ConcurrentMarkSweepGeneration::collect(bool   full,
1649                                             bool   clear_all_soft_refs,
1650                                             size_t size,
1651                                             bool   tlab)
1652 {
1653   collector()->collect(full, clear_all_soft_refs, size, tlab);
1654 }
1655 
1656 void CMSCollector::collect(bool   full,
1657                            bool   clear_all_soft_refs,
1658                            size_t size,
1659                            bool   tlab)
1660 {
1661   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1662     // For debugging purposes skip the collection if the state
1663     // is not currently idle
1664     if (TraceCMSState) {
1665       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1666         Thread::current(), full, _collectorState);
1667     }
1668     return;
1669   }
1670 
1671   // The following "if" branch is present for defensive reasons.
1672   // In the current uses of this interface, it can be replaced with:
1673   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1674   // But I am not placing that assert here to allow future
1675   // generality in invoking this interface.
1676   if (GC_locker::is_active()) {
1677     // A consistency test for GC_locker
1678     assert(GC_locker::needs_gc(), "Should have been set already");
1679     // Skip this foreground collection, instead
1680     // expanding the heap if necessary.
1681     // Need the free list locks for the call to free() in compute_new_size()
1682     compute_new_size();
1683     return;
1684   }
1685   acquire_control_and_collect(full, clear_all_soft_refs);
1686   _full_gcs_since_conc_gc++;
1687 }
1688 
1689 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1690   GenCollectedHeap* gch = GenCollectedHeap::heap();
1691   unsigned int gc_count = gch->total_full_collections();
1692   if (gc_count == full_gc_count) {
1693     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1694     _full_gc_requested = true;
1695     _full_gc_cause = cause;
1696     CGC_lock->notify();   // nudge CMS thread
1697   } else {
1698     assert(gc_count > full_gc_count, "Error: causal loop");
1699   }
1700 }
1701 
1702 bool CMSCollector::is_external_interruption() {
1703   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1704   return GCCause::is_user_requested_gc(cause) ||
1705          GCCause::is_serviceability_requested_gc(cause);
1706 }
1707 
1708 void CMSCollector::report_concurrent_mode_interruption() {
1709   if (is_external_interruption()) {
1710     if (PrintGCDetails) {
1711       gclog_or_tty->print(" (concurrent mode interrupted)");
1712     }
1713   } else {
1714     if (PrintGCDetails) {
1715       gclog_or_tty->print(" (concurrent mode failure)");
1716     }
1717     _gc_tracer_cm->report_concurrent_mode_failure();
1718   }
1719 }
1720 
1721 
1722 // The foreground and background collectors need to coordinate in order
1723 // to make sure that they do not mutually interfere with CMS collections.
1724 // When a background collection is active,
1725 // the foreground collector may need to take over (preempt) and
1726 // synchronously complete an ongoing collection. Depending on the
1727 // frequency of the background collections and the heap usage
1728 // of the application, this preemption can be seldom or frequent.
1729 // There are only certain
1730 // points in the background collection that the "collection-baton"
1731 // can be passed to the foreground collector.
1732 //
1733 // The foreground collector will wait for the baton before
1734 // starting any part of the collection.  The foreground collector
1735 // will only wait at one location.
1736 //
1737 // The background collector will yield the baton before starting a new
1738 // phase of the collection (e.g., before initial marking, marking from roots,
1739 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1740 // of the loop which switches the phases. The background collector does some
1741 // of the phases (initial mark, final re-mark) with the world stopped.
1742 // Because of locking involved in stopping the world,
1743 // the foreground collector should not block waiting for the background
1744 // collector when it is doing a stop-the-world phase.  The background
1745 // collector will yield the baton at an additional point just before
1746 // it enters a stop-the-world phase.  Once the world is stopped, the
1747 // background collector checks the phase of the collection.  If the
1748 // phase has not changed, it proceeds with the collection.  If the
1749 // phase has changed, it skips that phase of the collection.  See
1750 // the comments on the use of the Heap_lock in collect_in_background().
1751 //
1752 // Variable used in baton passing.
1753 //   _foregroundGCIsActive - Set to true by the foreground collector when
1754 //      it wants the baton.  The foreground clears it when it has finished
1755 //      the collection.
1756 //   _foregroundGCShouldWait - Set to true by the background collector
1757 //        when it is running.  The foreground collector waits while
1758 //      _foregroundGCShouldWait is true.
1759 //  CGC_lock - monitor used to protect access to the above variables
1760 //      and to notify the foreground and background collectors.
1761 //  _collectorState - current state of the CMS collection.
1762 //
1763 // The foreground collector
1764 //   acquires the CGC_lock
1765 //   sets _foregroundGCIsActive
1766 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1767 //     various locks acquired in preparation for the collection
1768 //     are released so as not to block the background collector
1769 //     that is in the midst of a collection
1770 //   proceeds with the collection
1771 //   clears _foregroundGCIsActive
1772 //   returns
1773 //
1774 // The background collector in a loop iterating on the phases of the
1775 //      collection
1776 //   acquires the CGC_lock
1777 //   sets _foregroundGCShouldWait
1778 //   if _foregroundGCIsActive is set
1779 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1780 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1781 //     and exits the loop.
1782 //   otherwise
1783 //     proceed with that phase of the collection
1784 //     if the phase is a stop-the-world phase,
1785 //       yield the baton once more just before enqueueing
1786 //       the stop-world CMS operation (executed by the VM thread).
1787 //   returns after all phases of the collection are done
1788 //
1789 
1790 void CMSCollector::acquire_control_and_collect(bool full,
1791         bool clear_all_soft_refs) {
1792   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1793   assert(!Thread::current()->is_ConcurrentGC_thread(),
1794          "shouldn't try to acquire control from self!");
1795 
1796   // Start the protocol for acquiring control of the
1797   // collection from the background collector (aka CMS thread).
1798   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1799          "VM thread should have CMS token");
1800   // Remember the possibly interrupted state of an ongoing
1801   // concurrent collection
1802   CollectorState first_state = _collectorState;
1803 
1804   // Signal to a possibly ongoing concurrent collection that
1805   // we want to do a foreground collection.
1806   _foregroundGCIsActive = true;
1807 
1808   // Disable incremental mode during a foreground collection.
1809   ICMSDisabler icms_disabler;
1810 
1811   // release locks and wait for a notify from the background collector
1812   // releasing the locks in only necessary for phases which
1813   // do yields to improve the granularity of the collection.
1814   assert_lock_strong(bitMapLock());
1815   // We need to lock the Free list lock for the space that we are
1816   // currently collecting.
1817   assert(haveFreelistLocks(), "Must be holding free list locks");
1818   bitMapLock()->unlock();
1819   releaseFreelistLocks();
1820   {
1821     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1822     if (_foregroundGCShouldWait) {
1823       // We are going to be waiting for action for the CMS thread;
1824       // it had better not be gone (for instance at shutdown)!
1825       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1826              "CMS thread must be running");
1827       // Wait here until the background collector gives us the go-ahead
1828       ConcurrentMarkSweepThread::clear_CMS_flag(
1829         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1830       // Get a possibly blocked CMS thread going:
1831       //   Note that we set _foregroundGCIsActive true above,
1832       //   without protection of the CGC_lock.
1833       CGC_lock->notify();
1834       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1835              "Possible deadlock");
1836       while (_foregroundGCShouldWait) {
1837         // wait for notification
1838         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1839         // Possibility of delay/starvation here, since CMS token does
1840         // not know to give priority to VM thread? Actually, i think
1841         // there wouldn't be any delay/starvation, but the proof of
1842         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1843       }
1844       ConcurrentMarkSweepThread::set_CMS_flag(
1845         ConcurrentMarkSweepThread::CMS_vm_has_token);
1846     }
1847   }
1848   // The CMS_token is already held.  Get back the other locks.
1849   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1850          "VM thread should have CMS token");
1851   getFreelistLocks();
1852   bitMapLock()->lock_without_safepoint_check();
1853   if (TraceCMSState) {
1854     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1855       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1856     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1857   }
1858 
1859   // Check if we need to do a compaction, or if not, whether
1860   // we need to start the mark-sweep from scratch.
1861   bool should_compact    = false;
1862   bool should_start_over = false;
1863   decide_foreground_collection_type(clear_all_soft_refs,
1864     &should_compact, &should_start_over);
1865 
1866 NOT_PRODUCT(
1867   if (RotateCMSCollectionTypes) {
1868     if (_cmsGen->debug_collection_type() ==
1869         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1870       should_compact = true;
1871     } else if (_cmsGen->debug_collection_type() ==
1872                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1873       should_compact = false;
1874     }
1875   }
1876 )
1877 
1878   if (first_state > Idling) {
1879     report_concurrent_mode_interruption();
1880   }
1881 
1882   set_did_compact(should_compact);
1883   if (should_compact) {
1884     // If the collection is being acquired from the background
1885     // collector, there may be references on the discovered
1886     // references lists that have NULL referents (being those
1887     // that were concurrently cleared by a mutator) or
1888     // that are no longer active (having been enqueued concurrently
1889     // by the mutator).
1890     // Scrub the list of those references because Mark-Sweep-Compact
1891     // code assumes referents are not NULL and that all discovered
1892     // Reference objects are active.
1893     ref_processor()->clean_up_discovered_references();
1894 
1895     if (first_state > Idling) {
1896       save_heap_summary();
1897     }
1898 
1899     do_compaction_work(clear_all_soft_refs);
1900 
1901     // Has the GC time limit been exceeded?
1902     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1903     size_t max_eden_size = young_gen->max_capacity() -
1904                            young_gen->to()->capacity() -
1905                            young_gen->from()->capacity();
1906     GenCollectedHeap* gch = GenCollectedHeap::heap();
1907     GCCause::Cause gc_cause = gch->gc_cause();
1908     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1909                                            young_gen->eden()->used(),
1910                                            _cmsGen->max_capacity(),
1911                                            max_eden_size,
1912                                            full,
1913                                            gc_cause,
1914                                            gch->collector_policy());
1915   } else {
1916     do_mark_sweep_work(clear_all_soft_refs, first_state,
1917       should_start_over);
1918   }
1919   // Reset the expansion cause, now that we just completed
1920   // a collection cycle.
1921   clear_expansion_cause();
1922   _foregroundGCIsActive = false;
1923   return;
1924 }
1925 
1926 // Resize the tenured generation
1927 // after obtaining the free list locks for the
1928 // two generations.
1929 void CMSCollector::compute_new_size() {
1930   assert_locked_or_safepoint(Heap_lock);
1931   FreelistLocker z(this);
1932   MetaspaceGC::compute_new_size();
1933   _cmsGen->compute_new_size_free_list();
1934 }
1935 
1936 // A work method used by foreground collection to determine
1937 // what type of collection (compacting or not, continuing or fresh)
1938 // it should do.
1939 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1940 // and CMSCompactWhenClearAllSoftRefs the default in the future
1941 // and do away with the flags after a suitable period.
1942 void CMSCollector::decide_foreground_collection_type(
1943   bool clear_all_soft_refs, bool* should_compact,
1944   bool* should_start_over) {
1945   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1946   // flag is set, and we have either requested a System.gc() or
1947   // the number of full gc's since the last concurrent cycle
1948   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1949   // or if an incremental collection has failed
1950   GenCollectedHeap* gch = GenCollectedHeap::heap();
1951   assert(gch->collector_policy()->is_two_generation_policy(),
1952          "You may want to check the correctness of the following");
1953   // Inform cms gen if this was due to partial collection failing.
1954   // The CMS gen may use this fact to determine its expansion policy.
1955   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1956     assert(!_cmsGen->incremental_collection_failed(),
1957            "Should have been noticed, reacted to and cleared");
1958     _cmsGen->set_incremental_collection_failed();
1959   }
1960   *should_compact =
1961     UseCMSCompactAtFullCollection &&
1962     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1963      GCCause::is_user_requested_gc(gch->gc_cause()) ||
1964      gch->incremental_collection_will_fail(true /* consult_young */));
1965   *should_start_over = false;
1966   if (clear_all_soft_refs && !*should_compact) {
1967     // We are about to do a last ditch collection attempt
1968     // so it would normally make sense to do a compaction
1969     // to reclaim as much space as possible.
1970     if (CMSCompactWhenClearAllSoftRefs) {
1971       // Default: The rationale is that in this case either
1972       // we are past the final marking phase, in which case
1973       // we'd have to start over, or so little has been done
1974       // that there's little point in saving that work. Compaction
1975       // appears to be the sensible choice in either case.
1976       *should_compact = true;
1977     } else {
1978       // We have been asked to clear all soft refs, but not to
1979       // compact. Make sure that we aren't past the final checkpoint
1980       // phase, for that is where we process soft refs. If we are already
1981       // past that phase, we'll need to redo the refs discovery phase and
1982       // if necessary clear soft refs that weren't previously
1983       // cleared. We do so by remembering the phase in which
1984       // we came in, and if we are past the refs processing
1985       // phase, we'll choose to just redo the mark-sweep
1986       // collection from scratch.
1987       if (_collectorState > FinalMarking) {
1988         // We are past the refs processing phase;
1989         // start over and do a fresh synchronous CMS cycle
1990         _collectorState = Resetting; // skip to reset to start new cycle
1991         reset(false /* == !asynch */);
1992         *should_start_over = true;
1993       } // else we can continue a possibly ongoing current cycle
1994     }
1995   }
1996 }
1997 
1998 // A work method used by the foreground collector to do
1999 // a mark-sweep-compact.
2000 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
2001   GenCollectedHeap* gch = GenCollectedHeap::heap();
2002 
2003   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2004   gc_timer->register_gc_start(os::elapsed_counter());
2005 
2006   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2007   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2008 
2009   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2010   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2011     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2012       "collections passed to foreground collector", _full_gcs_since_conc_gc);
2013   }
2014 
2015   // Sample collection interval time and reset for collection pause.
2016   if (UseAdaptiveSizePolicy) {
2017     size_policy()->msc_collection_begin();
2018   }
2019 
2020   // Temporarily widen the span of the weak reference processing to
2021   // the entire heap.
2022   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2023   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2024   // Temporarily, clear the "is_alive_non_header" field of the
2025   // reference processor.
2026   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2027   // Temporarily make reference _processing_ single threaded (non-MT).
2028   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2029   // Temporarily make refs discovery atomic
2030   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2031   // Temporarily make reference _discovery_ single threaded (non-MT)
2032   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2033 
2034   ref_processor()->set_enqueuing_is_done(false);
2035   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2036   ref_processor()->setup_policy(clear_all_soft_refs);
2037   // If an asynchronous collection finishes, the _modUnionTable is
2038   // all clear.  If we are assuming the collection from an asynchronous
2039   // collection, clear the _modUnionTable.
2040   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2041     "_modUnionTable should be clear if the baton was not passed");
2042   _modUnionTable.clear_all();
2043   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2044     "mod union for klasses should be clear if the baton was passed");
2045   _ct->klass_rem_set()->clear_mod_union();
2046 
2047   // We must adjust the allocation statistics being maintained
2048   // in the free list space. We do so by reading and clearing
2049   // the sweep timer and updating the block flux rate estimates below.
2050   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2051   if (_inter_sweep_timer.is_active()) {
2052     _inter_sweep_timer.stop();
2053     // Note that we do not use this sample to update the _inter_sweep_estimate.
2054     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2055                                             _inter_sweep_estimate.padded_average(),
2056                                             _intra_sweep_estimate.padded_average());
2057   }
2058 
2059   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2060     ref_processor(), clear_all_soft_refs);
2061   #ifdef ASSERT
2062     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2063     size_t free_size = cms_space->free();
2064     assert(free_size ==
2065            pointer_delta(cms_space->end(), cms_space->compaction_top())
2066            * HeapWordSize,
2067       "All the free space should be compacted into one chunk at top");
2068     assert(cms_space->dictionary()->total_chunk_size(
2069                                       debug_only(cms_space->freelistLock())) == 0 ||
2070            cms_space->totalSizeInIndexedFreeLists() == 0,
2071       "All the free space should be in a single chunk");
2072     size_t num = cms_space->totalCount();
2073     assert((free_size == 0 && num == 0) ||
2074            (free_size > 0  && (num == 1 || num == 2)),
2075          "There should be at most 2 free chunks after compaction");
2076   #endif // ASSERT
2077   _collectorState = Resetting;
2078   assert(_restart_addr == NULL,
2079          "Should have been NULL'd before baton was passed");
2080   reset(false /* == !asynch */);
2081   _cmsGen->reset_after_compaction();
2082   _concurrent_cycles_since_last_unload = 0;
2083 
2084   // Clear any data recorded in the PLAB chunk arrays.
2085   if (_survivor_plab_array != NULL) {
2086     reset_survivor_plab_arrays();
2087   }
2088 
2089   // Adjust the per-size allocation stats for the next epoch.
2090   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2091   // Restart the "inter sweep timer" for the next epoch.
2092   _inter_sweep_timer.reset();
2093   _inter_sweep_timer.start();
2094 
2095   // Sample collection pause time and reset for collection interval.
2096   if (UseAdaptiveSizePolicy) {
2097     size_policy()->msc_collection_end(gch->gc_cause());
2098   }
2099 
2100   gc_timer->register_gc_end(os::elapsed_counter());
2101 
2102   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2103 
2104   // For a mark-sweep-compact, compute_new_size() will be called
2105   // in the heap's do_collection() method.
2106 }
2107 
2108 // A work method used by the foreground collector to do
2109 // a mark-sweep, after taking over from a possibly on-going
2110 // concurrent mark-sweep collection.
2111 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2112   CollectorState first_state, bool should_start_over) {
2113   if (PrintGC && Verbose) {
2114     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2115       "collector with count %d",
2116       _full_gcs_since_conc_gc);
2117   }
2118   switch (_collectorState) {
2119     case Idling:
2120       if (first_state == Idling || should_start_over) {
2121         // The background GC was not active, or should
2122         // restarted from scratch;  start the cycle.
2123         _collectorState = InitialMarking;
2124       }
2125       // If first_state was not Idling, then a background GC
2126       // was in progress and has now finished.  No need to do it
2127       // again.  Leave the state as Idling.
2128       break;
2129     case Precleaning:
2130       // In the foreground case don't do the precleaning since
2131       // it is not done concurrently and there is extra work
2132       // required.
2133       _collectorState = FinalMarking;
2134   }
2135   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2136 
2137   // For a mark-sweep, compute_new_size() will be called
2138   // in the heap's do_collection() method.
2139 }
2140 
2141 
2142 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2143   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2144   EdenSpace* eden_space = dng->eden();
2145   ContiguousSpace* from_space = dng->from();
2146   ContiguousSpace* to_space   = dng->to();
2147   // Eden
2148   if (_eden_chunk_array != NULL) {
2149     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2150                            eden_space->bottom(), eden_space->top(),
2151                            eden_space->end(), eden_space->capacity());
2152     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2153                            "_eden_chunk_capacity=" SIZE_FORMAT,
2154                            _eden_chunk_index, _eden_chunk_capacity);
2155     for (size_t i = 0; i < _eden_chunk_index; i++) {
2156       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2157                              i, _eden_chunk_array[i]);
2158     }
2159   }
2160   // Survivor
2161   if (_survivor_chunk_array != NULL) {
2162     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2163                            from_space->bottom(), from_space->top(),
2164                            from_space->end(), from_space->capacity());
2165     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2166                            "_survivor_chunk_capacity=" SIZE_FORMAT,
2167                            _survivor_chunk_index, _survivor_chunk_capacity);
2168     for (size_t i = 0; i < _survivor_chunk_index; i++) {
2169       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2170                              i, _survivor_chunk_array[i]);
2171     }
2172   }
2173 }
2174 
2175 void CMSCollector::getFreelistLocks() const {
2176   // Get locks for all free lists in all generations that this
2177   // collector is responsible for
2178   _cmsGen->freelistLock()->lock_without_safepoint_check();
2179 }
2180 
2181 void CMSCollector::releaseFreelistLocks() const {
2182   // Release locks for all free lists in all generations that this
2183   // collector is responsible for
2184   _cmsGen->freelistLock()->unlock();
2185 }
2186 
2187 bool CMSCollector::haveFreelistLocks() const {
2188   // Check locks for all free lists in all generations that this
2189   // collector is responsible for
2190   assert_lock_strong(_cmsGen->freelistLock());
2191   PRODUCT_ONLY(ShouldNotReachHere());
2192   return true;
2193 }
2194 
2195 // A utility class that is used by the CMS collector to
2196 // temporarily "release" the foreground collector from its
2197 // usual obligation to wait for the background collector to
2198 // complete an ongoing phase before proceeding.
2199 class ReleaseForegroundGC: public StackObj {
2200  private:
2201   CMSCollector* _c;
2202  public:
2203   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2204     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2205     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2206     // allow a potentially blocked foreground collector to proceed
2207     _c->_foregroundGCShouldWait = false;
2208     if (_c->_foregroundGCIsActive) {
2209       CGC_lock->notify();
2210     }
2211     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2212            "Possible deadlock");
2213   }
2214 
2215   ~ReleaseForegroundGC() {
2216     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2217     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2218     _c->_foregroundGCShouldWait = true;
2219   }
2220 };
2221 
2222 // There are separate collect_in_background and collect_in_foreground because of
2223 // the different locking requirements of the background collector and the
2224 // foreground collector.  There was originally an attempt to share
2225 // one "collect" method between the background collector and the foreground
2226 // collector but the if-then-else required made it cleaner to have
2227 // separate methods.
2228 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2229   assert(Thread::current()->is_ConcurrentGC_thread(),
2230     "A CMS asynchronous collection is only allowed on a CMS thread.");
2231 
2232   GenCollectedHeap* gch = GenCollectedHeap::heap();
2233   {
2234     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2235     MutexLockerEx hl(Heap_lock, safepoint_check);
2236     FreelistLocker fll(this);
2237     MutexLockerEx x(CGC_lock, safepoint_check);
2238     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2239       // The foreground collector is active or we're
2240       // not using asynchronous collections.  Skip this
2241       // background collection.
2242       assert(!_foregroundGCShouldWait, "Should be clear");
2243       return;
2244     } else {
2245       assert(_collectorState == Idling, "Should be idling before start.");
2246       _collectorState = InitialMarking;
2247       register_gc_start(cause);
2248       // Reset the expansion cause, now that we are about to begin
2249       // a new cycle.
2250       clear_expansion_cause();
2251 
2252       // Clear the MetaspaceGC flag since a concurrent collection
2253       // is starting but also clear it after the collection.
2254       MetaspaceGC::set_should_concurrent_collect(false);
2255     }
2256     // Decide if we want to enable class unloading as part of the
2257     // ensuing concurrent GC cycle.
2258     update_should_unload_classes();
2259     _full_gc_requested = false;           // acks all outstanding full gc requests
2260     _full_gc_cause = GCCause::_no_gc;
2261     // Signal that we are about to start a collection
2262     gch->increment_total_full_collections();  // ... starting a collection cycle
2263     _collection_count_start = gch->total_full_collections();
2264   }
2265 
2266   // Used for PrintGC
2267   size_t prev_used;
2268   if (PrintGC && Verbose) {
2269     prev_used = _cmsGen->used(); // XXXPERM
2270   }
2271 
2272   // The change of the collection state is normally done at this level;
2273   // the exceptions are phases that are executed while the world is
2274   // stopped.  For those phases the change of state is done while the
2275   // world is stopped.  For baton passing purposes this allows the
2276   // background collector to finish the phase and change state atomically.
2277   // The foreground collector cannot wait on a phase that is done
2278   // while the world is stopped because the foreground collector already
2279   // has the world stopped and would deadlock.
2280   while (_collectorState != Idling) {
2281     if (TraceCMSState) {
2282       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2283         Thread::current(), _collectorState);
2284     }
2285     // The foreground collector
2286     //   holds the Heap_lock throughout its collection.
2287     //   holds the CMS token (but not the lock)
2288     //     except while it is waiting for the background collector to yield.
2289     //
2290     // The foreground collector should be blocked (not for long)
2291     //   if the background collector is about to start a phase
2292     //   executed with world stopped.  If the background
2293     //   collector has already started such a phase, the
2294     //   foreground collector is blocked waiting for the
2295     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
2296     //   are executed in the VM thread.
2297     //
2298     // The locking order is
2299     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
2300     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
2301     //   CMS token  (claimed in
2302     //                stop_world_and_do() -->
2303     //                  safepoint_synchronize() -->
2304     //                    CMSThread::synchronize())
2305 
2306     {
2307       // Check if the FG collector wants us to yield.
2308       CMSTokenSync x(true); // is cms thread
2309       if (waitForForegroundGC()) {
2310         // We yielded to a foreground GC, nothing more to be
2311         // done this round.
2312         assert(_foregroundGCShouldWait == false, "We set it to false in "
2313                "waitForForegroundGC()");
2314         if (TraceCMSState) {
2315           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2316             " exiting collection CMS state %d",
2317             Thread::current(), _collectorState);
2318         }
2319         return;
2320       } else {
2321         // The background collector can run but check to see if the
2322         // foreground collector has done a collection while the
2323         // background collector was waiting to get the CGC_lock
2324         // above.  If yes, break so that _foregroundGCShouldWait
2325         // is cleared before returning.
2326         if (_collectorState == Idling) {
2327           break;
2328         }
2329       }
2330     }
2331 
2332     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2333       "should be waiting");
2334 
2335     switch (_collectorState) {
2336       case InitialMarking:
2337         {
2338           ReleaseForegroundGC x(this);
2339           stats().record_cms_begin();
2340           VM_CMS_Initial_Mark initial_mark_op(this);
2341           VMThread::execute(&initial_mark_op);
2342         }
2343         // The collector state may be any legal state at this point
2344         // since the background collector may have yielded to the
2345         // foreground collector.
2346         break;
2347       case Marking:
2348         // initial marking in checkpointRootsInitialWork has been completed
2349         if (markFromRoots(true)) { // we were successful
2350           assert(_collectorState == Precleaning, "Collector state should "
2351             "have changed");
2352         } else {
2353           assert(_foregroundGCIsActive, "Internal state inconsistency");
2354         }
2355         break;
2356       case Precleaning:
2357         if (UseAdaptiveSizePolicy) {
2358           size_policy()->concurrent_precleaning_begin();
2359         }
2360         // marking from roots in markFromRoots has been completed
2361         preclean();
2362         if (UseAdaptiveSizePolicy) {
2363           size_policy()->concurrent_precleaning_end();
2364         }
2365         assert(_collectorState == AbortablePreclean ||
2366                _collectorState == FinalMarking,
2367                "Collector state should have changed");
2368         break;
2369       case AbortablePreclean:
2370         if (UseAdaptiveSizePolicy) {
2371         size_policy()->concurrent_phases_resume();
2372         }
2373         abortable_preclean();
2374         if (UseAdaptiveSizePolicy) {
2375           size_policy()->concurrent_precleaning_end();
2376         }
2377         assert(_collectorState == FinalMarking, "Collector state should "
2378           "have changed");
2379         break;
2380       case FinalMarking:
2381         {
2382           ReleaseForegroundGC x(this);
2383 
2384           VM_CMS_Final_Remark final_remark_op(this);
2385           VMThread::execute(&final_remark_op);
2386         }
2387         assert(_foregroundGCShouldWait, "block post-condition");
2388         break;
2389       case Sweeping:
2390         if (UseAdaptiveSizePolicy) {
2391           size_policy()->concurrent_sweeping_begin();
2392         }
2393         // final marking in checkpointRootsFinal has been completed
2394         sweep(true);
2395         assert(_collectorState == Resizing, "Collector state change "
2396           "to Resizing must be done under the free_list_lock");
2397         _full_gcs_since_conc_gc = 0;
2398 
2399         // Stop the timers for adaptive size policy for the concurrent phases
2400         if (UseAdaptiveSizePolicy) {
2401           size_policy()->concurrent_sweeping_end();
2402           size_policy()->concurrent_phases_end(gch->gc_cause(),
2403                                              gch->prev_gen(_cmsGen)->capacity(),
2404                                              _cmsGen->free());
2405         }
2406 
2407       case Resizing: {
2408         // Sweeping has been completed...
2409         // At this point the background collection has completed.
2410         // Don't move the call to compute_new_size() down
2411         // into code that might be executed if the background
2412         // collection was preempted.
2413         {
2414           ReleaseForegroundGC x(this);   // unblock FG collection
2415           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2416           CMSTokenSync        z(true);   // not strictly needed.
2417           if (_collectorState == Resizing) {
2418             compute_new_size();
2419             save_heap_summary();
2420             _collectorState = Resetting;
2421           } else {
2422             assert(_collectorState == Idling, "The state should only change"
2423                    " because the foreground collector has finished the collection");
2424           }
2425         }
2426         break;
2427       }
2428       case Resetting:
2429         // CMS heap resizing has been completed
2430         reset(true);
2431         assert(_collectorState == Idling, "Collector state should "
2432           "have changed");
2433 
2434         MetaspaceGC::set_should_concurrent_collect(false);
2435 
2436         stats().record_cms_end();
2437         // Don't move the concurrent_phases_end() and compute_new_size()
2438         // calls to here because a preempted background collection
2439         // has it's state set to "Resetting".
2440         break;
2441       case Idling:
2442       default:
2443         ShouldNotReachHere();
2444         break;
2445     }
2446     if (TraceCMSState) {
2447       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2448         Thread::current(), _collectorState);
2449     }
2450     assert(_foregroundGCShouldWait, "block post-condition");
2451   }
2452 
2453   // Should this be in gc_epilogue?
2454   collector_policy()->counters()->update_counters();
2455 
2456   {
2457     // Clear _foregroundGCShouldWait and, in the event that the
2458     // foreground collector is waiting, notify it, before
2459     // returning.
2460     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2461     _foregroundGCShouldWait = false;
2462     if (_foregroundGCIsActive) {
2463       CGC_lock->notify();
2464     }
2465     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2466            "Possible deadlock");
2467   }
2468   if (TraceCMSState) {
2469     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2470       " exiting collection CMS state %d",
2471       Thread::current(), _collectorState);
2472   }
2473   if (PrintGC && Verbose) {
2474     _cmsGen->print_heap_change(prev_used);
2475   }
2476 }
2477 
2478 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2479   if (!_cms_start_registered) {
2480     register_gc_start(cause);
2481   }
2482 }
2483 
2484 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2485   _cms_start_registered = true;
2486   _gc_timer_cm->register_gc_start(os::elapsed_counter());
2487   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2488 }
2489 
2490 void CMSCollector::register_gc_end() {
2491   if (_cms_start_registered) {
2492     report_heap_summary(GCWhen::AfterGC);
2493 
2494     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2495     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2496     _cms_start_registered = false;
2497   }
2498 }
2499 
2500 void CMSCollector::save_heap_summary() {
2501   GenCollectedHeap* gch = GenCollectedHeap::heap();
2502   _last_heap_summary = gch->create_heap_summary();
2503   _last_metaspace_summary = gch->create_metaspace_summary();
2504 }
2505 
2506 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2507   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
2508 }
2509 
2510 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2511   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2512          "Foreground collector should be waiting, not executing");
2513   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2514     "may only be done by the VM Thread with the world stopped");
2515   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2516          "VM thread should have CMS token");
2517 
2518   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2519     true, NULL);)
2520   if (UseAdaptiveSizePolicy) {
2521     size_policy()->ms_collection_begin();
2522   }
2523   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2524 
2525   HandleMark hm;  // Discard invalid handles created during verification
2526 
2527   if (VerifyBeforeGC &&
2528       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2529     Universe::verify();
2530   }
2531 
2532   // Snapshot the soft reference policy to be used in this collection cycle.
2533   ref_processor()->setup_policy(clear_all_soft_refs);
2534 
2535   bool init_mark_was_synchronous = false; // until proven otherwise
2536   while (_collectorState != Idling) {
2537     if (TraceCMSState) {
2538       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2539         Thread::current(), _collectorState);
2540     }
2541     switch (_collectorState) {
2542       case InitialMarking:
2543         register_foreground_gc_start(cause);
2544         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2545         checkpointRootsInitial(false);
2546         assert(_collectorState == Marking, "Collector state should have changed"
2547           " within checkpointRootsInitial()");
2548         break;
2549       case Marking:
2550         // initial marking in checkpointRootsInitialWork has been completed
2551         if (VerifyDuringGC &&
2552             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2553           Universe::verify("Verify before initial mark: ");
2554         }
2555         {
2556           bool res = markFromRoots(false);
2557           assert(res && _collectorState == FinalMarking, "Collector state should "
2558             "have changed");
2559           break;
2560         }
2561       case FinalMarking:
2562         if (VerifyDuringGC &&
2563             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2564           Universe::verify("Verify before re-mark: ");
2565         }
2566         checkpointRootsFinal(false, clear_all_soft_refs,
2567                              init_mark_was_synchronous);
2568         assert(_collectorState == Sweeping, "Collector state should not "
2569           "have changed within checkpointRootsFinal()");
2570         break;
2571       case Sweeping:
2572         // final marking in checkpointRootsFinal has been completed
2573         if (VerifyDuringGC &&
2574             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2575           Universe::verify("Verify before sweep: ");
2576         }
2577         sweep(false);
2578         assert(_collectorState == Resizing, "Incorrect state");
2579         break;
2580       case Resizing: {
2581         // Sweeping has been completed; the actual resize in this case
2582         // is done separately; nothing to be done in this state.
2583         _collectorState = Resetting;
2584         break;
2585       }
2586       case Resetting:
2587         // The heap has been resized.
2588         if (VerifyDuringGC &&
2589             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2590           Universe::verify("Verify before reset: ");
2591         }
2592         save_heap_summary();
2593         reset(false);
2594         assert(_collectorState == Idling, "Collector state should "
2595           "have changed");
2596         break;
2597       case Precleaning:
2598       case AbortablePreclean:
2599         // Elide the preclean phase
2600         _collectorState = FinalMarking;
2601         break;
2602       default:
2603         ShouldNotReachHere();
2604     }
2605     if (TraceCMSState) {
2606       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2607         Thread::current(), _collectorState);
2608     }
2609   }
2610 
2611   if (UseAdaptiveSizePolicy) {
2612     GenCollectedHeap* gch = GenCollectedHeap::heap();
2613     size_policy()->ms_collection_end(gch->gc_cause());
2614   }
2615 
2616   if (VerifyAfterGC &&
2617       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2618     Universe::verify();
2619   }
2620   if (TraceCMSState) {
2621     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2622       " exiting collection CMS state %d",
2623       Thread::current(), _collectorState);
2624   }
2625 }
2626 
2627 bool CMSCollector::waitForForegroundGC() {
2628   bool res = false;
2629   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2630          "CMS thread should have CMS token");
2631   // Block the foreground collector until the
2632   // background collectors decides whether to
2633   // yield.
2634   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2635   _foregroundGCShouldWait = true;
2636   if (_foregroundGCIsActive) {
2637     // The background collector yields to the
2638     // foreground collector and returns a value
2639     // indicating that it has yielded.  The foreground
2640     // collector can proceed.
2641     res = true;
2642     _foregroundGCShouldWait = false;
2643     ConcurrentMarkSweepThread::clear_CMS_flag(
2644       ConcurrentMarkSweepThread::CMS_cms_has_token);
2645     ConcurrentMarkSweepThread::set_CMS_flag(
2646       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2647     // Get a possibly blocked foreground thread going
2648     CGC_lock->notify();
2649     if (TraceCMSState) {
2650       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2651         Thread::current(), _collectorState);
2652     }
2653     while (_foregroundGCIsActive) {
2654       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2655     }
2656     ConcurrentMarkSweepThread::set_CMS_flag(
2657       ConcurrentMarkSweepThread::CMS_cms_has_token);
2658     ConcurrentMarkSweepThread::clear_CMS_flag(
2659       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2660   }
2661   if (TraceCMSState) {
2662     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2663       Thread::current(), _collectorState);
2664   }
2665   return res;
2666 }
2667 
2668 // Because of the need to lock the free lists and other structures in
2669 // the collector, common to all the generations that the collector is
2670 // collecting, we need the gc_prologues of individual CMS generations
2671 // delegate to their collector. It may have been simpler had the
2672 // current infrastructure allowed one to call a prologue on a
2673 // collector. In the absence of that we have the generation's
2674 // prologue delegate to the collector, which delegates back
2675 // some "local" work to a worker method in the individual generations
2676 // that it's responsible for collecting, while itself doing any
2677 // work common to all generations it's responsible for. A similar
2678 // comment applies to the  gc_epilogue()'s.
2679 // The role of the varaible _between_prologue_and_epilogue is to
2680 // enforce the invocation protocol.
2681 void CMSCollector::gc_prologue(bool full) {
2682   // Call gc_prologue_work() for the CMSGen
2683   // we are responsible for.
2684 
2685   // The following locking discipline assumes that we are only called
2686   // when the world is stopped.
2687   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2688 
2689   // The CMSCollector prologue must call the gc_prologues for the
2690   // "generations" that it's responsible
2691   // for.
2692 
2693   assert(   Thread::current()->is_VM_thread()
2694          || (   CMSScavengeBeforeRemark
2695              && Thread::current()->is_ConcurrentGC_thread()),
2696          "Incorrect thread type for prologue execution");
2697 
2698   if (_between_prologue_and_epilogue) {
2699     // We have already been invoked; this is a gc_prologue delegation
2700     // from yet another CMS generation that we are responsible for, just
2701     // ignore it since all relevant work has already been done.
2702     return;
2703   }
2704 
2705   // set a bit saying prologue has been called; cleared in epilogue
2706   _between_prologue_and_epilogue = true;
2707   // Claim locks for common data structures, then call gc_prologue_work()
2708   // for each CMSGen.
2709 
2710   getFreelistLocks();   // gets free list locks on constituent spaces
2711   bitMapLock()->lock_without_safepoint_check();
2712 
2713   // Should call gc_prologue_work() for all cms gens we are responsible for
2714   bool duringMarking =    _collectorState >= Marking
2715                          && _collectorState < Sweeping;
2716 
2717   // The young collections clear the modified oops state, which tells if
2718   // there are any modified oops in the class. The remark phase also needs
2719   // that information. Tell the young collection to save the union of all
2720   // modified klasses.
2721   if (duringMarking) {
2722     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2723   }
2724 
2725   bool registerClosure = duringMarking;
2726 
2727   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2728                                                &_modUnionClosurePar
2729                                                : &_modUnionClosure;
2730   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2731 
2732   if (!full) {
2733     stats().record_gc0_begin();
2734   }
2735 }
2736 
2737 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2738 
2739   _capacity_at_prologue = capacity();
2740   _used_at_prologue = used();
2741 
2742   // Delegate to CMScollector which knows how to coordinate between
2743   // this and any other CMS generations that it is responsible for
2744   // collecting.
2745   collector()->gc_prologue(full);
2746 }
2747 
2748 // This is a "private" interface for use by this generation's CMSCollector.
2749 // Not to be called directly by any other entity (for instance,
2750 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2751 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2752   bool registerClosure, ModUnionClosure* modUnionClosure) {
2753   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2754   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2755     "Should be NULL");
2756   if (registerClosure) {
2757     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2758   }
2759   cmsSpace()->gc_prologue();
2760   // Clear stat counters
2761   NOT_PRODUCT(
2762     assert(_numObjectsPromoted == 0, "check");
2763     assert(_numWordsPromoted   == 0, "check");
2764     if (Verbose && PrintGC) {
2765       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2766                           SIZE_FORMAT" bytes concurrently",
2767       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2768     }
2769     _numObjectsAllocated = 0;
2770     _numWordsAllocated   = 0;
2771   )
2772 }
2773 
2774 void CMSCollector::gc_epilogue(bool full) {
2775   // The following locking discipline assumes that we are only called
2776   // when the world is stopped.
2777   assert(SafepointSynchronize::is_at_safepoint(),
2778          "world is stopped assumption");
2779 
2780   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2781   // if linear allocation blocks need to be appropriately marked to allow the
2782   // the blocks to be parsable. We also check here whether we need to nudge the
2783   // CMS collector thread to start a new cycle (if it's not already active).
2784   assert(   Thread::current()->is_VM_thread()
2785          || (   CMSScavengeBeforeRemark
2786              && Thread::current()->is_ConcurrentGC_thread()),
2787          "Incorrect thread type for epilogue execution");
2788 
2789   if (!_between_prologue_and_epilogue) {
2790     // We have already been invoked; this is a gc_epilogue delegation
2791     // from yet another CMS generation that we are responsible for, just
2792     // ignore it since all relevant work has already been done.
2793     return;
2794   }
2795   assert(haveFreelistLocks(), "must have freelist locks");
2796   assert_lock_strong(bitMapLock());
2797 
2798   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2799 
2800   _cmsGen->gc_epilogue_work(full);
2801 
2802   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2803     // in case sampling was not already enabled, enable it
2804     _start_sampling = true;
2805   }
2806   // reset _eden_chunk_array so sampling starts afresh
2807   _eden_chunk_index = 0;
2808 
2809   size_t cms_used   = _cmsGen->cmsSpace()->used();
2810 
2811   // update performance counters - this uses a special version of
2812   // update_counters() that allows the utilization to be passed as a
2813   // parameter, avoiding multiple calls to used().
2814   //
2815   _cmsGen->update_counters(cms_used);
2816 
2817   if (CMSIncrementalMode) {
2818     icms_update_allocation_limits();
2819   }
2820 
2821   bitMapLock()->unlock();
2822   releaseFreelistLocks();
2823 
2824   if (!CleanChunkPoolAsync) {
2825     Chunk::clean_chunk_pool();
2826   }
2827 
2828   set_did_compact(false);
2829   _between_prologue_and_epilogue = false;  // ready for next cycle
2830 }
2831 
2832 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2833   collector()->gc_epilogue(full);
2834 
2835   // Also reset promotion tracking in par gc thread states.
2836   if (CollectedHeap::use_parallel_gc_threads()) {
2837     for (uint i = 0; i < ParallelGCThreads; i++) {
2838       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2839     }
2840   }
2841 }
2842 
2843 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2844   assert(!incremental_collection_failed(), "Should have been cleared");
2845   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2846   cmsSpace()->gc_epilogue();
2847     // Print stat counters
2848   NOT_PRODUCT(
2849     assert(_numObjectsAllocated == 0, "check");
2850     assert(_numWordsAllocated == 0, "check");
2851     if (Verbose && PrintGC) {
2852       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2853                           SIZE_FORMAT" bytes",
2854                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2855     }
2856     _numObjectsPromoted = 0;
2857     _numWordsPromoted   = 0;
2858   )
2859 
2860   if (PrintGC && Verbose) {
2861     // Call down the chain in contiguous_available needs the freelistLock
2862     // so print this out before releasing the freeListLock.
2863     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2864                         contiguous_available());
2865   }
2866 }
2867 
2868 #ifndef PRODUCT
2869 bool CMSCollector::have_cms_token() {
2870   Thread* thr = Thread::current();
2871   if (thr->is_VM_thread()) {
2872     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2873   } else if (thr->is_ConcurrentGC_thread()) {
2874     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2875   } else if (thr->is_GC_task_thread()) {
2876     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2877            ParGCRareEvent_lock->owned_by_self();
2878   }
2879   return false;
2880 }
2881 #endif
2882 
2883 // Check reachability of the given heap address in CMS generation,
2884 // treating all other generations as roots.
2885 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2886   // We could "guarantee" below, rather than assert, but i'll
2887   // leave these as "asserts" so that an adventurous debugger
2888   // could try this in the product build provided some subset of
2889   // the conditions were met, provided they were intersted in the
2890   // results and knew that the computation below wouldn't interfere
2891   // with other concurrent computations mutating the structures
2892   // being read or written.
2893   assert(SafepointSynchronize::is_at_safepoint(),
2894          "Else mutations in object graph will make answer suspect");
2895   assert(have_cms_token(), "Should hold cms token");
2896   assert(haveFreelistLocks(), "must hold free list locks");
2897   assert_lock_strong(bitMapLock());
2898 
2899   // Clear the marking bit map array before starting, but, just
2900   // for kicks, first report if the given address is already marked
2901   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2902                 _markBitMap.isMarked(addr) ? "" : " not");
2903 
2904   if (verify_after_remark()) {
2905     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2906     bool result = verification_mark_bm()->isMarked(addr);
2907     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2908                            result ? "IS" : "is NOT");
2909     return result;
2910   } else {
2911     gclog_or_tty->print_cr("Could not compute result");
2912     return false;
2913   }
2914 }
2915 
2916 
2917 void
2918 CMSCollector::print_on_error(outputStream* st) {
2919   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2920   if (collector != NULL) {
2921     CMSBitMap* bitmap = &collector->_markBitMap;
2922     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2923     bitmap->print_on_error(st, " Bits: ");
2924 
2925     st->cr();
2926 
2927     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2928     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2929     mut_bitmap->print_on_error(st, " Bits: ");
2930   }
2931 }
2932 
2933 ////////////////////////////////////////////////////////
2934 // CMS Verification Support
2935 ////////////////////////////////////////////////////////
2936 // Following the remark phase, the following invariant
2937 // should hold -- each object in the CMS heap which is
2938 // marked in markBitMap() should be marked in the verification_mark_bm().
2939 
2940 class VerifyMarkedClosure: public BitMapClosure {
2941   CMSBitMap* _marks;
2942   bool       _failed;
2943 
2944  public:
2945   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2946 
2947   bool do_bit(size_t offset) {
2948     HeapWord* addr = _marks->offsetToHeapWord(offset);
2949     if (!_marks->isMarked(addr)) {
2950       oop(addr)->print_on(gclog_or_tty);
2951       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2952       _failed = true;
2953     }
2954     return true;
2955   }
2956 
2957   bool failed() { return _failed; }
2958 };
2959 
2960 bool CMSCollector::verify_after_remark(bool silent) {
2961   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2962   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2963   static bool init = false;
2964 
2965   assert(SafepointSynchronize::is_at_safepoint(),
2966          "Else mutations in object graph will make answer suspect");
2967   assert(have_cms_token(),
2968          "Else there may be mutual interference in use of "
2969          " verification data structures");
2970   assert(_collectorState > Marking && _collectorState <= Sweeping,
2971          "Else marking info checked here may be obsolete");
2972   assert(haveFreelistLocks(), "must hold free list locks");
2973   assert_lock_strong(bitMapLock());
2974 
2975 
2976   // Allocate marking bit map if not already allocated
2977   if (!init) { // first time
2978     if (!verification_mark_bm()->allocate(_span)) {
2979       return false;
2980     }
2981     init = true;
2982   }
2983 
2984   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2985 
2986   // Turn off refs discovery -- so we will be tracing through refs.
2987   // This is as intended, because by this time
2988   // GC must already have cleared any refs that need to be cleared,
2989   // and traced those that need to be marked; moreover,
2990   // the marking done here is not going to intefere in any
2991   // way with the marking information used by GC.
2992   NoRefDiscovery no_discovery(ref_processor());
2993 
2994   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2995 
2996   // Clear any marks from a previous round
2997   verification_mark_bm()->clear_all();
2998   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2999   verify_work_stacks_empty();
3000 
3001   GenCollectedHeap* gch = GenCollectedHeap::heap();
3002   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3003   // Update the saved marks which may affect the root scans.
3004   gch->save_marks();
3005 
3006   if (CMSRemarkVerifyVariant == 1) {
3007     // In this first variant of verification, we complete
3008     // all marking, then check if the new marks-verctor is
3009     // a subset of the CMS marks-vector.
3010     verify_after_remark_work_1();
3011   } else if (CMSRemarkVerifyVariant == 2) {
3012     // In this second variant of verification, we flag an error
3013     // (i.e. an object reachable in the new marks-vector not reachable
3014     // in the CMS marks-vector) immediately, also indicating the
3015     // identify of an object (A) that references the unmarked object (B) --
3016     // presumably, a mutation to A failed to be picked up by preclean/remark?
3017     verify_after_remark_work_2();
3018   } else {
3019     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
3020             CMSRemarkVerifyVariant);
3021   }
3022   if (!silent) gclog_or_tty->print(" done] ");
3023   return true;
3024 }
3025 
3026 void CMSCollector::verify_after_remark_work_1() {
3027   ResourceMark rm;
3028   HandleMark  hm;
3029   GenCollectedHeap* gch = GenCollectedHeap::heap();
3030 
3031   // Get a clear set of claim bits for the strong roots processing to work with.
3032   ClassLoaderDataGraph::clear_claimed_marks();
3033 
3034   // Mark from roots one level into CMS
3035   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3036   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3037 
3038   gch->gen_process_strong_roots(_cmsGen->level(),
3039                                 true,   // younger gens are roots
3040                                 true,   // activate StrongRootsScope
3041                                 false,  // not scavenging
3042                                 SharedHeap::ScanningOption(roots_scanning_options()),
3043                                 &notOlder,
3044                                 true,   // walk code active on stacks
3045                                 NULL,
3046                                 NULL); // SSS: Provide correct closure
3047 
3048   // Now mark from the roots
3049   MarkFromRootsClosure markFromRootsClosure(this, _span,
3050     verification_mark_bm(), verification_mark_stack(),
3051     false /* don't yield */, true /* verifying */);
3052   assert(_restart_addr == NULL, "Expected pre-condition");
3053   verification_mark_bm()->iterate(&markFromRootsClosure);
3054   while (_restart_addr != NULL) {
3055     // Deal with stack overflow: by restarting at the indicated
3056     // address.
3057     HeapWord* ra = _restart_addr;
3058     markFromRootsClosure.reset(ra);
3059     _restart_addr = NULL;
3060     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3061   }
3062   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3063   verify_work_stacks_empty();
3064 
3065   // Marking completed -- now verify that each bit marked in
3066   // verification_mark_bm() is also marked in markBitMap(); flag all
3067   // errors by printing corresponding objects.
3068   VerifyMarkedClosure vcl(markBitMap());
3069   verification_mark_bm()->iterate(&vcl);
3070   if (vcl.failed()) {
3071     gclog_or_tty->print("Verification failed");
3072     Universe::heap()->print_on(gclog_or_tty);
3073     fatal("CMS: failed marking verification after remark");
3074   }
3075 }
3076 
3077 class VerifyKlassOopsKlassClosure : public KlassClosure {
3078   class VerifyKlassOopsClosure : public OopClosure {
3079     CMSBitMap* _bitmap;
3080    public:
3081     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3082     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3083     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3084   } _oop_closure;
3085  public:
3086   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3087   void do_klass(Klass* k) {
3088     k->oops_do(&_oop_closure);
3089   }
3090 };
3091 
3092 void CMSCollector::verify_after_remark_work_2() {
3093   ResourceMark rm;
3094   HandleMark  hm;
3095   GenCollectedHeap* gch = GenCollectedHeap::heap();
3096 
3097   // Get a clear set of claim bits for the strong roots processing to work with.
3098   ClassLoaderDataGraph::clear_claimed_marks();
3099 
3100   // Mark from roots one level into CMS
3101   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3102                                      markBitMap());
3103   CMKlassClosure klass_closure(&notOlder);
3104 
3105   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3106   gch->gen_process_strong_roots(_cmsGen->level(),
3107                                 true,   // younger gens are roots
3108                                 true,   // activate StrongRootsScope
3109                                 false,  // not scavenging
3110                                 SharedHeap::ScanningOption(roots_scanning_options()),
3111                                 &notOlder,
3112                                 true,   // walk code active on stacks
3113                                 NULL,
3114                                 &klass_closure);
3115 
3116   // Now mark from the roots
3117   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3118     verification_mark_bm(), markBitMap(), verification_mark_stack());
3119   assert(_restart_addr == NULL, "Expected pre-condition");
3120   verification_mark_bm()->iterate(&markFromRootsClosure);
3121   while (_restart_addr != NULL) {
3122     // Deal with stack overflow: by restarting at the indicated
3123     // address.
3124     HeapWord* ra = _restart_addr;
3125     markFromRootsClosure.reset(ra);
3126     _restart_addr = NULL;
3127     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3128   }
3129   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3130   verify_work_stacks_empty();
3131 
3132   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3133   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3134 
3135   // Marking completed -- now verify that each bit marked in
3136   // verification_mark_bm() is also marked in markBitMap(); flag all
3137   // errors by printing corresponding objects.
3138   VerifyMarkedClosure vcl(markBitMap());
3139   verification_mark_bm()->iterate(&vcl);
3140   assert(!vcl.failed(), "Else verification above should not have succeeded");
3141 }
3142 
3143 void ConcurrentMarkSweepGeneration::save_marks() {
3144   // delegate to CMS space
3145   cmsSpace()->save_marks();
3146   for (uint i = 0; i < ParallelGCThreads; i++) {
3147     _par_gc_thread_states[i]->promo.startTrackingPromotions();
3148   }
3149 }
3150 
3151 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3152   return cmsSpace()->no_allocs_since_save_marks();
3153 }
3154 
3155 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
3156                                                                 \
3157 void ConcurrentMarkSweepGeneration::                            \
3158 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
3159   cl->set_generation(this);                                     \
3160   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
3161   cl->reset_generation();                                       \
3162   save_marks();                                                 \
3163 }
3164 
3165 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3166 
3167 void
3168 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3169   cl->set_generation(this);
3170   younger_refs_in_space_iterate(_cmsSpace, cl);
3171   cl->reset_generation();
3172 }
3173 
3174 void
3175 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
3176   if (freelistLock()->owned_by_self()) {
3177     Generation::oop_iterate(mr, cl);
3178   } else {
3179     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3180     Generation::oop_iterate(mr, cl);
3181   }
3182 }
3183 
3184 void
3185 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3186   if (freelistLock()->owned_by_self()) {
3187     Generation::oop_iterate(cl);
3188   } else {
3189     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3190     Generation::oop_iterate(cl);
3191   }
3192 }
3193 
3194 void
3195 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3196   if (freelistLock()->owned_by_self()) {
3197     Generation::object_iterate(cl);
3198   } else {
3199     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3200     Generation::object_iterate(cl);
3201   }
3202 }
3203 
3204 void
3205 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3206   if (freelistLock()->owned_by_self()) {
3207     Generation::safe_object_iterate(cl);
3208   } else {
3209     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3210     Generation::safe_object_iterate(cl);
3211   }
3212 }
3213 
3214 void
3215 ConcurrentMarkSweepGeneration::post_compact() {
3216 }
3217 
3218 void
3219 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3220   // Fix the linear allocation blocks to look like free blocks.
3221 
3222   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3223   // are not called when the heap is verified during universe initialization and
3224   // at vm shutdown.
3225   if (freelistLock()->owned_by_self()) {
3226     cmsSpace()->prepare_for_verify();
3227   } else {
3228     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3229     cmsSpace()->prepare_for_verify();
3230   }
3231 }
3232 
3233 void
3234 ConcurrentMarkSweepGeneration::verify() {
3235   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3236   // are not called when the heap is verified during universe initialization and
3237   // at vm shutdown.
3238   if (freelistLock()->owned_by_self()) {
3239     cmsSpace()->verify();
3240   } else {
3241     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3242     cmsSpace()->verify();
3243   }
3244 }
3245 
3246 void CMSCollector::verify() {
3247   _cmsGen->verify();
3248 }
3249 
3250 #ifndef PRODUCT
3251 bool CMSCollector::overflow_list_is_empty() const {
3252   assert(_num_par_pushes >= 0, "Inconsistency");
3253   if (_overflow_list == NULL) {
3254     assert(_num_par_pushes == 0, "Inconsistency");
3255   }
3256   return _overflow_list == NULL;
3257 }
3258 
3259 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3260 // merely consolidate assertion checks that appear to occur together frequently.
3261 void CMSCollector::verify_work_stacks_empty() const {
3262   assert(_markStack.isEmpty(), "Marking stack should be empty");
3263   assert(overflow_list_is_empty(), "Overflow list should be empty");
3264 }
3265 
3266 void CMSCollector::verify_overflow_empty() const {
3267   assert(overflow_list_is_empty(), "Overflow list should be empty");
3268   assert(no_preserved_marks(), "No preserved marks");
3269 }
3270 #endif // PRODUCT
3271 
3272 // Decide if we want to enable class unloading as part of the
3273 // ensuing concurrent GC cycle. We will collect and
3274 // unload classes if it's the case that:
3275 // (1) an explicit gc request has been made and the flag
3276 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3277 // (2) (a) class unloading is enabled at the command line, and
3278 //     (b) old gen is getting really full
3279 // NOTE: Provided there is no change in the state of the heap between
3280 // calls to this method, it should have idempotent results. Moreover,
3281 // its results should be monotonically increasing (i.e. going from 0 to 1,
3282 // but not 1 to 0) between successive calls between which the heap was
3283 // not collected. For the implementation below, it must thus rely on
3284 // the property that concurrent_cycles_since_last_unload()
3285 // will not decrease unless a collection cycle happened and that
3286 // _cmsGen->is_too_full() are
3287 // themselves also monotonic in that sense. See check_monotonicity()
3288 // below.
3289 void CMSCollector::update_should_unload_classes() {
3290   _should_unload_classes = false;
3291   // Condition 1 above
3292   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3293     _should_unload_classes = true;
3294   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3295     // Disjuncts 2.b.(i,ii,iii) above
3296     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3297                               CMSClassUnloadingMaxInterval)
3298                            || _cmsGen->is_too_full();
3299   }
3300 }
3301 
3302 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3303   bool res = should_concurrent_collect();
3304   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3305   return res;
3306 }
3307 
3308 void CMSCollector::setup_cms_unloading_and_verification_state() {
3309   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3310                              || VerifyBeforeExit;
3311   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3312 
3313   if (should_unload_classes()) {   // Should unload classes this cycle
3314     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3315     set_verifying(should_verify);    // Set verification state for this cycle
3316     return;                            // Nothing else needs to be done at this time
3317   }
3318 
3319   // Not unloading classes this cycle
3320   assert(!should_unload_classes(), "Inconsitency!");
3321   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3322     // Include symbols, strings and code cache elements to prevent their resurrection.
3323     add_root_scanning_option(rso);
3324     set_verifying(true);
3325   } else if (verifying() && !should_verify) {
3326     // We were verifying, but some verification flags got disabled.
3327     set_verifying(false);
3328     // Exclude symbols, strings and code cache elements from root scanning to
3329     // reduce IM and RM pauses.
3330     remove_root_scanning_option(rso);
3331   }
3332 }
3333 
3334 
3335 #ifndef PRODUCT
3336 HeapWord* CMSCollector::block_start(const void* p) const {
3337   const HeapWord* addr = (HeapWord*)p;
3338   if (_span.contains(p)) {
3339     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3340       return _cmsGen->cmsSpace()->block_start(p);
3341     }
3342   }
3343   return NULL;
3344 }
3345 #endif
3346 
3347 HeapWord*
3348 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3349                                                    bool   tlab,
3350                                                    bool   parallel) {
3351   CMSSynchronousYieldRequest yr;
3352   assert(!tlab, "Can't deal with TLAB allocation");
3353   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3354   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3355     CMSExpansionCause::_satisfy_allocation);
3356   if (GCExpandToAllocateDelayMillis > 0) {
3357     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3358   }
3359   return have_lock_and_allocate(word_size, tlab);
3360 }
3361 
3362 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3363 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3364 // to CardGeneration and share it...
3365 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3366   return CardGeneration::expand(bytes, expand_bytes);
3367 }
3368 
3369 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3370   CMSExpansionCause::Cause cause)
3371 {
3372 
3373   bool success = expand(bytes, expand_bytes);
3374 
3375   // remember why we expanded; this information is used
3376   // by shouldConcurrentCollect() when making decisions on whether to start
3377   // a new CMS cycle.
3378   if (success) {
3379     set_expansion_cause(cause);
3380     if (PrintGCDetails && Verbose) {
3381       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3382         CMSExpansionCause::to_string(cause));
3383     }
3384   }
3385 }
3386 
3387 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3388   HeapWord* res = NULL;
3389   MutexLocker x(ParGCRareEvent_lock);
3390   while (true) {
3391     // Expansion by some other thread might make alloc OK now:
3392     res = ps->lab.alloc(word_sz);
3393     if (res != NULL) return res;
3394     // If there's not enough expansion space available, give up.
3395     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3396       return NULL;
3397     }
3398     // Otherwise, we try expansion.
3399     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3400       CMSExpansionCause::_allocate_par_lab);
3401     // Now go around the loop and try alloc again;
3402     // A competing par_promote might beat us to the expansion space,
3403     // so we may go around the loop again if promotion fails agaion.
3404     if (GCExpandToAllocateDelayMillis > 0) {
3405       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3406     }
3407   }
3408 }
3409 
3410 
3411 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3412   PromotionInfo* promo) {
3413   MutexLocker x(ParGCRareEvent_lock);
3414   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3415   while (true) {
3416     // Expansion by some other thread might make alloc OK now:
3417     if (promo->ensure_spooling_space()) {
3418       assert(promo->has_spooling_space(),
3419              "Post-condition of successful ensure_spooling_space()");
3420       return true;
3421     }
3422     // If there's not enough expansion space available, give up.
3423     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3424       return false;
3425     }
3426     // Otherwise, we try expansion.
3427     expand(refill_size_bytes, MinHeapDeltaBytes,
3428       CMSExpansionCause::_allocate_par_spooling_space);
3429     // Now go around the loop and try alloc again;
3430     // A competing allocation might beat us to the expansion space,
3431     // so we may go around the loop again if allocation fails again.
3432     if (GCExpandToAllocateDelayMillis > 0) {
3433       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3434     }
3435   }
3436 }
3437 
3438 
3439 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3440   assert_locked_or_safepoint(ExpandHeap_lock);
3441   // Shrink committed space
3442   _virtual_space.shrink_by(bytes);
3443   // Shrink space; this also shrinks the space's BOT
3444   _cmsSpace->set_end((HeapWord*) _virtual_space.high());
3445   size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3446   // Shrink the shared block offset array
3447   _bts->resize(new_word_size);
3448   MemRegion mr(_cmsSpace->bottom(), new_word_size);
3449   // Shrink the card table
3450   Universe::heap()->barrier_set()->resize_covered_region(mr);
3451 
3452   if (Verbose && PrintGC) {
3453     size_t new_mem_size = _virtual_space.committed_size();
3454     size_t old_mem_size = new_mem_size + bytes;
3455     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3456                   name(), old_mem_size/K, new_mem_size/K);
3457   }
3458 }
3459 
3460 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3461   assert_locked_or_safepoint(Heap_lock);
3462   size_t size = ReservedSpace::page_align_size_down(bytes);
3463   if (size > 0) {
3464     shrink_by(size);
3465   }
3466 }
3467 
3468 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3469   assert_locked_or_safepoint(Heap_lock);
3470   bool result = _virtual_space.expand_by(bytes);
3471   if (result) {
3472     size_t new_word_size =
3473       heap_word_size(_virtual_space.committed_size());
3474     MemRegion mr(_cmsSpace->bottom(), new_word_size);
3475     _bts->resize(new_word_size);  // resize the block offset shared array
3476     Universe::heap()->barrier_set()->resize_covered_region(mr);
3477     // Hmmmm... why doesn't CFLS::set_end verify locking?
3478     // This is quite ugly; FIX ME XXX
3479     _cmsSpace->assert_locked(freelistLock());
3480     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3481 
3482     // update the space and generation capacity counters
3483     if (UsePerfData) {
3484       _space_counters->update_capacity();
3485       _gen_counters->update_all();
3486     }
3487 
3488     if (Verbose && PrintGC) {
3489       size_t new_mem_size = _virtual_space.committed_size();
3490       size_t old_mem_size = new_mem_size - bytes;
3491       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3492                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
3493     }
3494   }
3495   return result;
3496 }
3497 
3498 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3499   assert_locked_or_safepoint(Heap_lock);
3500   bool success = true;
3501   const size_t remaining_bytes = _virtual_space.uncommitted_size();
3502   if (remaining_bytes > 0) {
3503     success = grow_by(remaining_bytes);
3504     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3505   }
3506   return success;
3507 }
3508 
3509 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3510   assert_locked_or_safepoint(Heap_lock);
3511   assert_lock_strong(freelistLock());
3512   if (PrintGCDetails && Verbose) {
3513     warning("Shrinking of CMS not yet implemented");
3514   }
3515   return;
3516 }
3517 
3518 
3519 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3520 // phases.
3521 class CMSPhaseAccounting: public StackObj {
3522  public:
3523   CMSPhaseAccounting(CMSCollector *collector,
3524                      const char *phase,
3525                      bool print_cr = true);
3526   ~CMSPhaseAccounting();
3527 
3528  private:
3529   CMSCollector *_collector;
3530   const char *_phase;
3531   elapsedTimer _wallclock;
3532   bool _print_cr;
3533 
3534  public:
3535   // Not MT-safe; so do not pass around these StackObj's
3536   // where they may be accessed by other threads.
3537   jlong wallclock_millis() {
3538     assert(_wallclock.is_active(), "Wall clock should not stop");
3539     _wallclock.stop();  // to record time
3540     jlong ret = _wallclock.milliseconds();
3541     _wallclock.start(); // restart
3542     return ret;
3543   }
3544 };
3545 
3546 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3547                                        const char *phase,
3548                                        bool print_cr) :
3549   _collector(collector), _phase(phase), _print_cr(print_cr) {
3550 
3551   if (PrintCMSStatistics != 0) {
3552     _collector->resetYields();
3553   }
3554   if (PrintGCDetails) {
3555     gclog_or_tty->date_stamp(PrintGCDateStamps);
3556     gclog_or_tty->stamp(PrintGCTimeStamps);
3557     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3558       _collector->cmsGen()->short_name(), _phase);
3559   }
3560   _collector->resetTimer();
3561   _wallclock.start();
3562   _collector->startTimer();
3563 }
3564 
3565 CMSPhaseAccounting::~CMSPhaseAccounting() {
3566   assert(_wallclock.is_active(), "Wall clock should not have stopped");
3567   _collector->stopTimer();
3568   _wallclock.stop();
3569   if (PrintGCDetails) {
3570     gclog_or_tty->date_stamp(PrintGCDateStamps);
3571     gclog_or_tty->stamp(PrintGCTimeStamps);
3572     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3573                  _collector->cmsGen()->short_name(),
3574                  _phase, _collector->timerValue(), _wallclock.seconds());
3575     if (_print_cr) {
3576       gclog_or_tty->print_cr("");
3577     }
3578     if (PrintCMSStatistics != 0) {
3579       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3580                     _collector->yields());
3581     }
3582   }
3583 }
3584 
3585 // CMS work
3586 
3587 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3588 class CMSParMarkTask : public AbstractGangTask {
3589  protected:
3590   CMSCollector*     _collector;
3591   int               _n_workers;
3592   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3593       AbstractGangTask(name),
3594       _collector(collector),
3595       _n_workers(n_workers) {}
3596   // Work method in support of parallel rescan ... of young gen spaces
3597   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3598                              ContiguousSpace* space,
3599                              HeapWord** chunk_array, size_t chunk_top);
3600   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3601 };
3602 
3603 // Parallel initial mark task
3604 class CMSParInitialMarkTask: public CMSParMarkTask {
3605  public:
3606   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3607       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3608                      collector, n_workers) {}
3609   void work(uint worker_id);
3610 };
3611 
3612 // Checkpoint the roots into this generation from outside
3613 // this generation. [Note this initial checkpoint need only
3614 // be approximate -- we'll do a catch up phase subsequently.]
3615 void CMSCollector::checkpointRootsInitial(bool asynch) {
3616   assert(_collectorState == InitialMarking, "Wrong collector state");
3617   check_correct_thread_executing();
3618   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3619 
3620   save_heap_summary();
3621   report_heap_summary(GCWhen::BeforeGC);
3622 
3623   ReferenceProcessor* rp = ref_processor();
3624   SpecializationStats::clear();
3625   assert(_restart_addr == NULL, "Control point invariant");
3626   if (asynch) {
3627     // acquire locks for subsequent manipulations
3628     MutexLockerEx x(bitMapLock(),
3629                     Mutex::_no_safepoint_check_flag);
3630     checkpointRootsInitialWork(asynch);
3631     // enable ("weak") refs discovery
3632     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3633     _collectorState = Marking;
3634   } else {
3635     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3636     // which recognizes if we are a CMS generation, and doesn't try to turn on
3637     // discovery; verify that they aren't meddling.
3638     assert(!rp->discovery_is_atomic(),
3639            "incorrect setting of discovery predicate");
3640     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3641            "ref discovery for this generation kind");
3642     // already have locks
3643     checkpointRootsInitialWork(asynch);
3644     // now enable ("weak") refs discovery
3645     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3646     _collectorState = Marking;
3647   }
3648   SpecializationStats::print();
3649 }
3650 
3651 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3652   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3653   assert(_collectorState == InitialMarking, "just checking");
3654 
3655   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3656   // precede our marking with a collection of all
3657   // younger generations to keep floating garbage to a minimum.
3658   // XXX: we won't do this for now -- it's an optimization to be done later.
3659 
3660   // already have locks
3661   assert_lock_strong(bitMapLock());
3662   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3663 
3664   // Setup the verification and class unloading state for this
3665   // CMS collection cycle.
3666   setup_cms_unloading_and_verification_state();
3667 
3668   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3669     PrintGCDetails && Verbose, true, _gc_timer_cm);)
3670   if (UseAdaptiveSizePolicy) {
3671     size_policy()->checkpoint_roots_initial_begin();
3672   }
3673 
3674   // Reset all the PLAB chunk arrays if necessary.
3675   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3676     reset_survivor_plab_arrays();
3677   }
3678 
3679   ResourceMark rm;
3680   HandleMark  hm;
3681 
3682   FalseClosure falseClosure;
3683   // In the case of a synchronous collection, we will elide the
3684   // remark step, so it's important to catch all the nmethod oops
3685   // in this step.
3686   // The final 'true' flag to gen_process_strong_roots will ensure this.
3687   // If 'async' is true, we can relax the nmethod tracing.
3688   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3689   GenCollectedHeap* gch = GenCollectedHeap::heap();
3690 
3691   verify_work_stacks_empty();
3692   verify_overflow_empty();
3693 
3694   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3695   // Update the saved marks which may affect the root scans.
3696   gch->save_marks();
3697 
3698   // weak reference processing has not started yet.
3699   ref_processor()->set_enqueuing_is_done(false);
3700 
3701   // Need to remember all newly created CLDs,
3702   // so that we can guarantee that the remark finds them.
3703   ClassLoaderDataGraph::remember_new_clds(true);
3704 
3705   // Whenever a CLD is found, it will be claimed before proceeding to mark
3706   // the klasses. The claimed marks need to be cleared before marking starts.
3707   ClassLoaderDataGraph::clear_claimed_marks();
3708 
3709   if (CMSPrintEdenSurvivorChunks) {
3710     print_eden_and_survivor_chunk_arrays();
3711   }
3712 
3713   {
3714     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3715     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3716       // The parallel version.
3717       FlexibleWorkGang* workers = gch->workers();
3718       assert(workers != NULL, "Need parallel worker threads.");
3719       int n_workers = workers->active_workers();
3720       CMSParInitialMarkTask tsk(this, n_workers);
3721       gch->set_par_threads(n_workers);
3722       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3723       if (n_workers > 1) {
3724         GenCollectedHeap::StrongRootsScope srs(gch);
3725         workers->run_task(&tsk);
3726       } else {
3727         GenCollectedHeap::StrongRootsScope srs(gch);
3728         tsk.work(0);
3729       }
3730       gch->set_par_threads(0);
3731     } else {
3732       // The serial version.
3733       CMKlassClosure klass_closure(&notOlder);
3734       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3735       gch->gen_process_strong_roots(_cmsGen->level(),
3736                                     true,   // younger gens are roots
3737                                     true,   // activate StrongRootsScope
3738                                     false,  // not scavenging
3739                                     SharedHeap::ScanningOption(roots_scanning_options()),
3740                                     &notOlder,
3741                                     true,   // walk all of code cache if (so & SO_CodeCache)
3742                                     NULL,
3743                                     &klass_closure);
3744     }
3745   }
3746 
3747   // Clear mod-union table; it will be dirtied in the prologue of
3748   // CMS generation per each younger generation collection.
3749 
3750   assert(_modUnionTable.isAllClear(),
3751        "Was cleared in most recent final checkpoint phase"
3752        " or no bits are set in the gc_prologue before the start of the next "
3753        "subsequent marking phase.");
3754 
3755   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3756 
3757   // Save the end of the used_region of the constituent generations
3758   // to be used to limit the extent of sweep in each generation.
3759   save_sweep_limits();
3760   if (UseAdaptiveSizePolicy) {
3761     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3762   }
3763   verify_overflow_empty();
3764 }
3765 
3766 bool CMSCollector::markFromRoots(bool asynch) {
3767   // we might be tempted to assert that:
3768   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3769   //        "inconsistent argument?");
3770   // However that wouldn't be right, because it's possible that
3771   // a safepoint is indeed in progress as a younger generation
3772   // stop-the-world GC happens even as we mark in this generation.
3773   assert(_collectorState == Marking, "inconsistent state?");
3774   check_correct_thread_executing();
3775   verify_overflow_empty();
3776 
3777   bool res;
3778   if (asynch) {
3779 
3780     // Start the timers for adaptive size policy for the concurrent phases
3781     // Do it here so that the foreground MS can use the concurrent
3782     // timer since a foreground MS might has the sweep done concurrently
3783     // or STW.
3784     if (UseAdaptiveSizePolicy) {
3785       size_policy()->concurrent_marking_begin();
3786     }
3787 
3788     // Weak ref discovery note: We may be discovering weak
3789     // refs in this generation concurrent (but interleaved) with
3790     // weak ref discovery by a younger generation collector.
3791 
3792     CMSTokenSyncWithLocks ts(true, bitMapLock());
3793     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3794     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3795     res = markFromRootsWork(asynch);
3796     if (res) {
3797       _collectorState = Precleaning;
3798     } else { // We failed and a foreground collection wants to take over
3799       assert(_foregroundGCIsActive, "internal state inconsistency");
3800       assert(_restart_addr == NULL,  "foreground will restart from scratch");
3801       if (PrintGCDetails) {
3802         gclog_or_tty->print_cr("bailing out to foreground collection");
3803       }
3804     }
3805     if (UseAdaptiveSizePolicy) {
3806       size_policy()->concurrent_marking_end();
3807     }
3808   } else {
3809     assert(SafepointSynchronize::is_at_safepoint(),
3810            "inconsistent with asynch == false");
3811     if (UseAdaptiveSizePolicy) {
3812       size_policy()->ms_collection_marking_begin();
3813     }
3814     // already have locks
3815     res = markFromRootsWork(asynch);
3816     _collectorState = FinalMarking;
3817     if (UseAdaptiveSizePolicy) {
3818       GenCollectedHeap* gch = GenCollectedHeap::heap();
3819       size_policy()->ms_collection_marking_end(gch->gc_cause());
3820     }
3821   }
3822   verify_overflow_empty();
3823   return res;
3824 }
3825 
3826 bool CMSCollector::markFromRootsWork(bool asynch) {
3827   // iterate over marked bits in bit map, doing a full scan and mark
3828   // from these roots using the following algorithm:
3829   // . if oop is to the right of the current scan pointer,
3830   //   mark corresponding bit (we'll process it later)
3831   // . else (oop is to left of current scan pointer)
3832   //   push oop on marking stack
3833   // . drain the marking stack
3834 
3835   // Note that when we do a marking step we need to hold the
3836   // bit map lock -- recall that direct allocation (by mutators)
3837   // and promotion (by younger generation collectors) is also
3838   // marking the bit map. [the so-called allocate live policy.]
3839   // Because the implementation of bit map marking is not
3840   // robust wrt simultaneous marking of bits in the same word,
3841   // we need to make sure that there is no such interference
3842   // between concurrent such updates.
3843 
3844   // already have locks
3845   assert_lock_strong(bitMapLock());
3846 
3847   verify_work_stacks_empty();
3848   verify_overflow_empty();
3849   bool result = false;
3850   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3851     result = do_marking_mt(asynch);
3852   } else {
3853     result = do_marking_st(asynch);
3854   }
3855   return result;
3856 }
3857 
3858 // Forward decl
3859 class CMSConcMarkingTask;
3860 
3861 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3862   CMSCollector*       _collector;
3863   CMSConcMarkingTask* _task;
3864  public:
3865   virtual void yield();
3866 
3867   // "n_threads" is the number of threads to be terminated.
3868   // "queue_set" is a set of work queues of other threads.
3869   // "collector" is the CMS collector associated with this task terminator.
3870   // "yield" indicates whether we need the gang as a whole to yield.
3871   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3872     ParallelTaskTerminator(n_threads, queue_set),
3873     _collector(collector) { }
3874 
3875   void set_task(CMSConcMarkingTask* task) {
3876     _task = task;
3877   }
3878 };
3879 
3880 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3881   CMSConcMarkingTask* _task;
3882  public:
3883   bool should_exit_termination();
3884   void set_task(CMSConcMarkingTask* task) {
3885     _task = task;
3886   }
3887 };
3888 
3889 // MT Concurrent Marking Task
3890 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3891   CMSCollector* _collector;
3892   int           _n_workers;                  // requested/desired # workers
3893   bool          _asynch;
3894   bool          _result;
3895   CompactibleFreeListSpace*  _cms_space;
3896   char          _pad_front[64];   // padding to ...
3897   HeapWord*     _global_finger;   // ... avoid sharing cache line
3898   char          _pad_back[64];
3899   HeapWord*     _restart_addr;
3900 
3901   //  Exposed here for yielding support
3902   Mutex* const _bit_map_lock;
3903 
3904   // The per thread work queues, available here for stealing
3905   OopTaskQueueSet*  _task_queues;
3906 
3907   // Termination (and yielding) support
3908   CMSConcMarkingTerminator _term;
3909   CMSConcMarkingTerminatorTerminator _term_term;
3910 
3911  public:
3912   CMSConcMarkingTask(CMSCollector* collector,
3913                  CompactibleFreeListSpace* cms_space,
3914                  bool asynch,
3915                  YieldingFlexibleWorkGang* workers,
3916                  OopTaskQueueSet* task_queues):
3917     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3918     _collector(collector),
3919     _cms_space(cms_space),
3920     _asynch(asynch), _n_workers(0), _result(true),
3921     _task_queues(task_queues),
3922     _term(_n_workers, task_queues, _collector),
3923     _bit_map_lock(collector->bitMapLock())
3924   {
3925     _requested_size = _n_workers;
3926     _term.set_task(this);
3927     _term_term.set_task(this);
3928     _restart_addr = _global_finger = _cms_space->bottom();
3929   }
3930 
3931 
3932   OopTaskQueueSet* task_queues()  { return _task_queues; }
3933 
3934   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3935 
3936   HeapWord** global_finger_addr() { return &_global_finger; }
3937 
3938   CMSConcMarkingTerminator* terminator() { return &_term; }
3939 
3940   virtual void set_for_termination(int active_workers) {
3941     terminator()->reset_for_reuse(active_workers);
3942   }
3943 
3944   void work(uint worker_id);
3945   bool should_yield() {
3946     return    ConcurrentMarkSweepThread::should_yield()
3947            && !_collector->foregroundGCIsActive()
3948            && _asynch;
3949   }
3950 
3951   virtual void coordinator_yield();  // stuff done by coordinator
3952   bool result() { return _result; }
3953 
3954   void reset(HeapWord* ra) {
3955     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3956     _restart_addr = _global_finger = ra;
3957     _term.reset_for_reuse();
3958   }
3959 
3960   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3961                                            OopTaskQueue* work_q);
3962 
3963  private:
3964   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3965   void do_work_steal(int i);
3966   void bump_global_finger(HeapWord* f);
3967 };
3968 
3969 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3970   assert(_task != NULL, "Error");
3971   return _task->yielding();
3972   // Note that we do not need the disjunct || _task->should_yield() above
3973   // because we want terminating threads to yield only if the task
3974   // is already in the midst of yielding, which happens only after at least one
3975   // thread has yielded.
3976 }
3977 
3978 void CMSConcMarkingTerminator::yield() {
3979   if (_task->should_yield()) {
3980     _task->yield();
3981   } else {
3982     ParallelTaskTerminator::yield();
3983   }
3984 }
3985 
3986 ////////////////////////////////////////////////////////////////
3987 // Concurrent Marking Algorithm Sketch
3988 ////////////////////////////////////////////////////////////////
3989 // Until all tasks exhausted (both spaces):
3990 // -- claim next available chunk
3991 // -- bump global finger via CAS
3992 // -- find first object that starts in this chunk
3993 //    and start scanning bitmap from that position
3994 // -- scan marked objects for oops
3995 // -- CAS-mark target, and if successful:
3996 //    . if target oop is above global finger (volatile read)
3997 //      nothing to do
3998 //    . if target oop is in chunk and above local finger
3999 //        then nothing to do
4000 //    . else push on work-queue
4001 // -- Deal with possible overflow issues:
4002 //    . local work-queue overflow causes stuff to be pushed on
4003 //      global (common) overflow queue
4004 //    . always first empty local work queue
4005 //    . then get a batch of oops from global work queue if any
4006 //    . then do work stealing
4007 // -- When all tasks claimed (both spaces)
4008 //    and local work queue empty,
4009 //    then in a loop do:
4010 //    . check global overflow stack; steal a batch of oops and trace
4011 //    . try to steal from other threads oif GOS is empty
4012 //    . if neither is available, offer termination
4013 // -- Terminate and return result
4014 //
4015 void CMSConcMarkingTask::work(uint worker_id) {
4016   elapsedTimer _timer;
4017   ResourceMark rm;
4018   HandleMark hm;
4019 
4020   DEBUG_ONLY(_collector->verify_overflow_empty();)
4021 
4022   // Before we begin work, our work queue should be empty
4023   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4024   // Scan the bitmap covering _cms_space, tracing through grey objects.
4025   _timer.start();
4026   do_scan_and_mark(worker_id, _cms_space);
4027   _timer.stop();
4028   if (PrintCMSStatistics != 0) {
4029     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4030       worker_id, _timer.seconds());
4031       // XXX: need xxx/xxx type of notation, two timers
4032   }
4033 
4034   // ... do work stealing
4035   _timer.reset();
4036   _timer.start();
4037   do_work_steal(worker_id);
4038   _timer.stop();
4039   if (PrintCMSStatistics != 0) {
4040     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4041       worker_id, _timer.seconds());
4042       // XXX: need xxx/xxx type of notation, two timers
4043   }
4044   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4045   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4046   // Note that under the current task protocol, the
4047   // following assertion is true even of the spaces
4048   // expanded since the completion of the concurrent
4049   // marking. XXX This will likely change under a strict
4050   // ABORT semantics.
4051   // After perm removal the comparison was changed to
4052   // greater than or equal to from strictly greater than.
4053   // Before perm removal the highest address sweep would
4054   // have been at the end of perm gen but now is at the
4055   // end of the tenured gen.
4056   assert(_global_finger >=  _cms_space->end(),
4057          "All tasks have been completed");
4058   DEBUG_ONLY(_collector->verify_overflow_empty();)
4059 }
4060 
4061 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4062   HeapWord* read = _global_finger;
4063   HeapWord* cur  = read;
4064   while (f > read) {
4065     cur = read;
4066     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4067     if (cur == read) {
4068       // our cas succeeded
4069       assert(_global_finger >= f, "protocol consistency");
4070       break;
4071     }
4072   }
4073 }
4074 
4075 // This is really inefficient, and should be redone by
4076 // using (not yet available) block-read and -write interfaces to the
4077 // stack and the work_queue. XXX FIX ME !!!
4078 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4079                                                       OopTaskQueue* work_q) {
4080   // Fast lock-free check
4081   if (ovflw_stk->length() == 0) {
4082     return false;
4083   }
4084   assert(work_q->size() == 0, "Shouldn't steal");
4085   MutexLockerEx ml(ovflw_stk->par_lock(),
4086                    Mutex::_no_safepoint_check_flag);
4087   // Grab up to 1/4 the size of the work queue
4088   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4089                     (size_t)ParGCDesiredObjsFromOverflowList);
4090   num = MIN2(num, ovflw_stk->length());
4091   for (int i = (int) num; i > 0; i--) {
4092     oop cur = ovflw_stk->pop();
4093     assert(cur != NULL, "Counted wrong?");
4094     work_q->push(cur);
4095   }
4096   return num > 0;
4097 }
4098 
4099 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4100   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4101   int n_tasks = pst->n_tasks();
4102   // We allow that there may be no tasks to do here because
4103   // we are restarting after a stack overflow.
4104   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4105   uint nth_task = 0;
4106 
4107   HeapWord* aligned_start = sp->bottom();
4108   if (sp->used_region().contains(_restart_addr)) {
4109     // Align down to a card boundary for the start of 0th task
4110     // for this space.
4111     aligned_start =
4112       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4113                                  CardTableModRefBS::card_size);
4114   }
4115 
4116   size_t chunk_size = sp->marking_task_size();
4117   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4118     // Having claimed the nth task in this space,
4119     // compute the chunk that it corresponds to:
4120     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4121                                aligned_start + (nth_task+1)*chunk_size);
4122     // Try and bump the global finger via a CAS;
4123     // note that we need to do the global finger bump
4124     // _before_ taking the intersection below, because
4125     // the task corresponding to that region will be
4126     // deemed done even if the used_region() expands
4127     // because of allocation -- as it almost certainly will
4128     // during start-up while the threads yield in the
4129     // closure below.
4130     HeapWord* finger = span.end();
4131     bump_global_finger(finger);   // atomically
4132     // There are null tasks here corresponding to chunks
4133     // beyond the "top" address of the space.
4134     span = span.intersection(sp->used_region());
4135     if (!span.is_empty()) {  // Non-null task
4136       HeapWord* prev_obj;
4137       assert(!span.contains(_restart_addr) || nth_task == 0,
4138              "Inconsistency");
4139       if (nth_task == 0) {
4140         // For the 0th task, we'll not need to compute a block_start.
4141         if (span.contains(_restart_addr)) {
4142           // In the case of a restart because of stack overflow,
4143           // we might additionally skip a chunk prefix.
4144           prev_obj = _restart_addr;
4145         } else {
4146           prev_obj = span.start();
4147         }
4148       } else {
4149         // We want to skip the first object because
4150         // the protocol is to scan any object in its entirety
4151         // that _starts_ in this span; a fortiori, any
4152         // object starting in an earlier span is scanned
4153         // as part of an earlier claimed task.
4154         // Below we use the "careful" version of block_start
4155         // so we do not try to navigate uninitialized objects.
4156         prev_obj = sp->block_start_careful(span.start());
4157         // Below we use a variant of block_size that uses the
4158         // Printezis bits to avoid waiting for allocated
4159         // objects to become initialized/parsable.
4160         while (prev_obj < span.start()) {
4161           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4162           if (sz > 0) {
4163             prev_obj += sz;
4164           } else {
4165             // In this case we may end up doing a bit of redundant
4166             // scanning, but that appears unavoidable, short of
4167             // locking the free list locks; see bug 6324141.
4168             break;
4169           }
4170         }
4171       }
4172       if (prev_obj < span.end()) {
4173         MemRegion my_span = MemRegion(prev_obj, span.end());
4174         // Do the marking work within a non-empty span --
4175         // the last argument to the constructor indicates whether the
4176         // iteration should be incremental with periodic yields.
4177         Par_MarkFromRootsClosure cl(this, _collector, my_span,
4178                                     &_collector->_markBitMap,
4179                                     work_queue(i),
4180                                     &_collector->_markStack,
4181                                     _asynch);
4182         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4183       } // else nothing to do for this task
4184     }   // else nothing to do for this task
4185   }
4186   // We'd be tempted to assert here that since there are no
4187   // more tasks left to claim in this space, the global_finger
4188   // must exceed space->top() and a fortiori space->end(). However,
4189   // that would not quite be correct because the bumping of
4190   // global_finger occurs strictly after the claiming of a task,
4191   // so by the time we reach here the global finger may not yet
4192   // have been bumped up by the thread that claimed the last
4193   // task.
4194   pst->all_tasks_completed();
4195 }
4196 
4197 class Par_ConcMarkingClosure: public CMSOopClosure {
4198  private:
4199   CMSCollector* _collector;
4200   CMSConcMarkingTask* _task;
4201   MemRegion     _span;
4202   CMSBitMap*    _bit_map;
4203   CMSMarkStack* _overflow_stack;
4204   OopTaskQueue* _work_queue;
4205  protected:
4206   DO_OOP_WORK_DEFN
4207  public:
4208   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4209                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4210     CMSOopClosure(collector->ref_processor()),
4211     _collector(collector),
4212     _task(task),
4213     _span(collector->_span),
4214     _work_queue(work_queue),
4215     _bit_map(bit_map),
4216     _overflow_stack(overflow_stack)
4217   { }
4218   virtual void do_oop(oop* p);
4219   virtual void do_oop(narrowOop* p);
4220 
4221   void trim_queue(size_t max);
4222   void handle_stack_overflow(HeapWord* lost);
4223   void do_yield_check() {
4224     if (_task->should_yield()) {
4225       _task->yield();
4226     }
4227   }
4228 };
4229 
4230 // Grey object scanning during work stealing phase --
4231 // the salient assumption here is that any references
4232 // that are in these stolen objects being scanned must
4233 // already have been initialized (else they would not have
4234 // been published), so we do not need to check for
4235 // uninitialized objects before pushing here.
4236 void Par_ConcMarkingClosure::do_oop(oop obj) {
4237   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4238   HeapWord* addr = (HeapWord*)obj;
4239   // Check if oop points into the CMS generation
4240   // and is not marked
4241   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4242     // a white object ...
4243     // If we manage to "claim" the object, by being the
4244     // first thread to mark it, then we push it on our
4245     // marking stack
4246     if (_bit_map->par_mark(addr)) {     // ... now grey
4247       // push on work queue (grey set)
4248       bool simulate_overflow = false;
4249       NOT_PRODUCT(
4250         if (CMSMarkStackOverflowALot &&
4251             _collector->simulate_overflow()) {
4252           // simulate a stack overflow
4253           simulate_overflow = true;
4254         }
4255       )
4256       if (simulate_overflow ||
4257           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4258         // stack overflow
4259         if (PrintCMSStatistics != 0) {
4260           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4261                                  SIZE_FORMAT, _overflow_stack->capacity());
4262         }
4263         // We cannot assert that the overflow stack is full because
4264         // it may have been emptied since.
4265         assert(simulate_overflow ||
4266                _work_queue->size() == _work_queue->max_elems(),
4267               "Else push should have succeeded");
4268         handle_stack_overflow(addr);
4269       }
4270     } // Else, some other thread got there first
4271     do_yield_check();
4272   }
4273 }
4274 
4275 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
4276 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4277 
4278 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4279   while (_work_queue->size() > max) {
4280     oop new_oop;
4281     if (_work_queue->pop_local(new_oop)) {
4282       assert(new_oop->is_oop(), "Should be an oop");
4283       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4284       assert(_span.contains((HeapWord*)new_oop), "Not in span");
4285       new_oop->oop_iterate(this);  // do_oop() above
4286       do_yield_check();
4287     }
4288   }
4289 }
4290 
4291 // Upon stack overflow, we discard (part of) the stack,
4292 // remembering the least address amongst those discarded
4293 // in CMSCollector's _restart_address.
4294 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4295   // We need to do this under a mutex to prevent other
4296   // workers from interfering with the work done below.
4297   MutexLockerEx ml(_overflow_stack->par_lock(),
4298                    Mutex::_no_safepoint_check_flag);
4299   // Remember the least grey address discarded
4300   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4301   _collector->lower_restart_addr(ra);
4302   _overflow_stack->reset();  // discard stack contents
4303   _overflow_stack->expand(); // expand the stack if possible
4304 }
4305 
4306 
4307 void CMSConcMarkingTask::do_work_steal(int i) {
4308   OopTaskQueue* work_q = work_queue(i);
4309   oop obj_to_scan;
4310   CMSBitMap* bm = &(_collector->_markBitMap);
4311   CMSMarkStack* ovflw = &(_collector->_markStack);
4312   int* seed = _collector->hash_seed(i);
4313   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4314   while (true) {
4315     cl.trim_queue(0);
4316     assert(work_q->size() == 0, "Should have been emptied above");
4317     if (get_work_from_overflow_stack(ovflw, work_q)) {
4318       // Can't assert below because the work obtained from the
4319       // overflow stack may already have been stolen from us.
4320       // assert(work_q->size() > 0, "Work from overflow stack");
4321       continue;
4322     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4323       assert(obj_to_scan->is_oop(), "Should be an oop");
4324       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4325       obj_to_scan->oop_iterate(&cl);
4326     } else if (terminator()->offer_termination(&_term_term)) {
4327       assert(work_q->size() == 0, "Impossible!");
4328       break;
4329     } else if (yielding() || should_yield()) {
4330       yield();
4331     }
4332   }
4333 }
4334 
4335 // This is run by the CMS (coordinator) thread.
4336 void CMSConcMarkingTask::coordinator_yield() {
4337   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4338          "CMS thread should hold CMS token");
4339   // First give up the locks, then yield, then re-lock
4340   // We should probably use a constructor/destructor idiom to
4341   // do this unlock/lock or modify the MutexUnlocker class to
4342   // serve our purpose. XXX
4343   assert_lock_strong(_bit_map_lock);
4344   _bit_map_lock->unlock();
4345   ConcurrentMarkSweepThread::desynchronize(true);
4346   ConcurrentMarkSweepThread::acknowledge_yield_request();
4347   _collector->stopTimer();
4348   if (PrintCMSStatistics != 0) {
4349     _collector->incrementYields();
4350   }
4351   _collector->icms_wait();
4352 
4353   // It is possible for whichever thread initiated the yield request
4354   // not to get a chance to wake up and take the bitmap lock between
4355   // this thread releasing it and reacquiring it. So, while the
4356   // should_yield() flag is on, let's sleep for a bit to give the
4357   // other thread a chance to wake up. The limit imposed on the number
4358   // of iterations is defensive, to avoid any unforseen circumstances
4359   // putting us into an infinite loop. Since it's always been this
4360   // (coordinator_yield()) method that was observed to cause the
4361   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4362   // which is by default non-zero. For the other seven methods that
4363   // also perform the yield operation, as are using a different
4364   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4365   // can enable the sleeping for those methods too, if necessary.
4366   // See 6442774.
4367   //
4368   // We really need to reconsider the synchronization between the GC
4369   // thread and the yield-requesting threads in the future and we
4370   // should really use wait/notify, which is the recommended
4371   // way of doing this type of interaction. Additionally, we should
4372   // consolidate the eight methods that do the yield operation and they
4373   // are almost identical into one for better maintenability and
4374   // readability. See 6445193.
4375   //
4376   // Tony 2006.06.29
4377   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4378                    ConcurrentMarkSweepThread::should_yield() &&
4379                    !CMSCollector::foregroundGCIsActive(); ++i) {
4380     os::sleep(Thread::current(), 1, false);
4381     ConcurrentMarkSweepThread::acknowledge_yield_request();
4382   }
4383 
4384   ConcurrentMarkSweepThread::synchronize(true);
4385   _bit_map_lock->lock_without_safepoint_check();
4386   _collector->startTimer();
4387 }
4388 
4389 bool CMSCollector::do_marking_mt(bool asynch) {
4390   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4391   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4392                                        conc_workers()->total_workers(),
4393                                        conc_workers()->active_workers(),
4394                                        Threads::number_of_non_daemon_threads());
4395   conc_workers()->set_active_workers(num_workers);
4396 
4397   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4398 
4399   CMSConcMarkingTask tsk(this,
4400                          cms_space,
4401                          asynch,
4402                          conc_workers(),
4403                          task_queues());
4404 
4405   // Since the actual number of workers we get may be different
4406   // from the number we requested above, do we need to do anything different
4407   // below? In particular, may be we need to subclass the SequantialSubTasksDone
4408   // class?? XXX
4409   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4410 
4411   // Refs discovery is already non-atomic.
4412   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4413   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4414   conc_workers()->start_task(&tsk);
4415   while (tsk.yielded()) {
4416     tsk.coordinator_yield();
4417     conc_workers()->continue_task(&tsk);
4418   }
4419   // If the task was aborted, _restart_addr will be non-NULL
4420   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4421   while (_restart_addr != NULL) {
4422     // XXX For now we do not make use of ABORTED state and have not
4423     // yet implemented the right abort semantics (even in the original
4424     // single-threaded CMS case). That needs some more investigation
4425     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4426     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4427     // If _restart_addr is non-NULL, a marking stack overflow
4428     // occurred; we need to do a fresh marking iteration from the
4429     // indicated restart address.
4430     if (_foregroundGCIsActive && asynch) {
4431       // We may be running into repeated stack overflows, having
4432       // reached the limit of the stack size, while making very
4433       // slow forward progress. It may be best to bail out and
4434       // let the foreground collector do its job.
4435       // Clear _restart_addr, so that foreground GC
4436       // works from scratch. This avoids the headache of
4437       // a "rescan" which would otherwise be needed because
4438       // of the dirty mod union table & card table.
4439       _restart_addr = NULL;
4440       return false;
4441     }
4442     // Adjust the task to restart from _restart_addr
4443     tsk.reset(_restart_addr);
4444     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4445                   _restart_addr);
4446     _restart_addr = NULL;
4447     // Get the workers going again
4448     conc_workers()->start_task(&tsk);
4449     while (tsk.yielded()) {
4450       tsk.coordinator_yield();
4451       conc_workers()->continue_task(&tsk);
4452     }
4453   }
4454   assert(tsk.completed(), "Inconsistency");
4455   assert(tsk.result() == true, "Inconsistency");
4456   return true;
4457 }
4458 
4459 bool CMSCollector::do_marking_st(bool asynch) {
4460   ResourceMark rm;
4461   HandleMark   hm;
4462 
4463   // Temporarily make refs discovery single threaded (non-MT)
4464   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4465   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4466     &_markStack, CMSYield && asynch);
4467   // the last argument to iterate indicates whether the iteration
4468   // should be incremental with periodic yields.
4469   _markBitMap.iterate(&markFromRootsClosure);
4470   // If _restart_addr is non-NULL, a marking stack overflow
4471   // occurred; we need to do a fresh iteration from the
4472   // indicated restart address.
4473   while (_restart_addr != NULL) {
4474     if (_foregroundGCIsActive && asynch) {
4475       // We may be running into repeated stack overflows, having
4476       // reached the limit of the stack size, while making very
4477       // slow forward progress. It may be best to bail out and
4478       // let the foreground collector do its job.
4479       // Clear _restart_addr, so that foreground GC
4480       // works from scratch. This avoids the headache of
4481       // a "rescan" which would otherwise be needed because
4482       // of the dirty mod union table & card table.
4483       _restart_addr = NULL;
4484       return false;  // indicating failure to complete marking
4485     }
4486     // Deal with stack overflow:
4487     // we restart marking from _restart_addr
4488     HeapWord* ra = _restart_addr;
4489     markFromRootsClosure.reset(ra);
4490     _restart_addr = NULL;
4491     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4492   }
4493   return true;
4494 }
4495 
4496 void CMSCollector::preclean() {
4497   check_correct_thread_executing();
4498   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4499   verify_work_stacks_empty();
4500   verify_overflow_empty();
4501   _abort_preclean = false;
4502   if (CMSPrecleaningEnabled) {
4503     if (!CMSEdenChunksRecordAlways) {
4504       _eden_chunk_index = 0;
4505     }
4506     size_t used = get_eden_used();
4507     size_t capacity = get_eden_capacity();
4508     // Don't start sampling unless we will get sufficiently
4509     // many samples.
4510     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4511                 * CMSScheduleRemarkEdenPenetration)) {
4512       _start_sampling = true;
4513     } else {
4514       _start_sampling = false;
4515     }
4516     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4517     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4518     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4519   }
4520   CMSTokenSync x(true); // is cms thread
4521   if (CMSPrecleaningEnabled) {
4522     sample_eden();
4523     _collectorState = AbortablePreclean;
4524   } else {
4525     _collectorState = FinalMarking;
4526   }
4527   verify_work_stacks_empty();
4528   verify_overflow_empty();
4529 }
4530 
4531 // Try and schedule the remark such that young gen
4532 // occupancy is CMSScheduleRemarkEdenPenetration %.
4533 void CMSCollector::abortable_preclean() {
4534   check_correct_thread_executing();
4535   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
4536   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4537 
4538   // If Eden's current occupancy is below this threshold,
4539   // immediately schedule the remark; else preclean
4540   // past the next scavenge in an effort to
4541   // schedule the pause as described avove. By choosing
4542   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4543   // we will never do an actual abortable preclean cycle.
4544   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4545     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4546     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4547     // We need more smarts in the abortable preclean
4548     // loop below to deal with cases where allocation
4549     // in young gen is very very slow, and our precleaning
4550     // is running a losing race against a horde of
4551     // mutators intent on flooding us with CMS updates
4552     // (dirty cards).
4553     // One, admittedly dumb, strategy is to give up
4554     // after a certain number of abortable precleaning loops
4555     // or after a certain maximum time. We want to make
4556     // this smarter in the next iteration.
4557     // XXX FIX ME!!! YSR
4558     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4559     while (!(should_abort_preclean() ||
4560              ConcurrentMarkSweepThread::should_terminate())) {
4561       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4562       cumworkdone += workdone;
4563       loops++;
4564       // Voluntarily terminate abortable preclean phase if we have
4565       // been at it for too long.
4566       if ((CMSMaxAbortablePrecleanLoops != 0) &&
4567           loops >= CMSMaxAbortablePrecleanLoops) {
4568         if (PrintGCDetails) {
4569           gclog_or_tty->print(" CMS: abort preclean due to loops ");
4570         }
4571         break;
4572       }
4573       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4574         if (PrintGCDetails) {
4575           gclog_or_tty->print(" CMS: abort preclean due to time ");
4576         }
4577         break;
4578       }
4579       // If we are doing little work each iteration, we should
4580       // take a short break.
4581       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4582         // Sleep for some time, waiting for work to accumulate
4583         stopTimer();
4584         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4585         startTimer();
4586         waited++;
4587       }
4588     }
4589     if (PrintCMSStatistics > 0) {
4590       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4591                           loops, waited, cumworkdone);
4592     }
4593   }
4594   CMSTokenSync x(true); // is cms thread
4595   if (_collectorState != Idling) {
4596     assert(_collectorState == AbortablePreclean,
4597            "Spontaneous state transition?");
4598     _collectorState = FinalMarking;
4599   } // Else, a foreground collection completed this CMS cycle.
4600   return;
4601 }
4602 
4603 // Respond to an Eden sampling opportunity
4604 void CMSCollector::sample_eden() {
4605   // Make sure a young gc cannot sneak in between our
4606   // reading and recording of a sample.
4607   assert(Thread::current()->is_ConcurrentGC_thread(),
4608          "Only the cms thread may collect Eden samples");
4609   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4610          "Should collect samples while holding CMS token");
4611   if (!_start_sampling) {
4612     return;
4613   }
4614   // When CMSEdenChunksRecordAlways is true, the eden chunk array
4615   // is populated by the young generation.
4616   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4617     if (_eden_chunk_index < _eden_chunk_capacity) {
4618       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4619       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4620              "Unexpected state of Eden");
4621       // We'd like to check that what we just sampled is an oop-start address;
4622       // however, we cannot do that here since the object may not yet have been
4623       // initialized. So we'll instead do the check when we _use_ this sample
4624       // later.
4625       if (_eden_chunk_index == 0 ||
4626           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4627                          _eden_chunk_array[_eden_chunk_index-1])
4628            >= CMSSamplingGrain)) {
4629         _eden_chunk_index++;  // commit sample
4630       }
4631     }
4632   }
4633   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4634     size_t used = get_eden_used();
4635     size_t capacity = get_eden_capacity();
4636     assert(used <= capacity, "Unexpected state of Eden");
4637     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4638       _abort_preclean = true;
4639     }
4640   }
4641 }
4642 
4643 
4644 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4645   assert(_collectorState == Precleaning ||
4646          _collectorState == AbortablePreclean, "incorrect state");
4647   ResourceMark rm;
4648   HandleMark   hm;
4649 
4650   // Precleaning is currently not MT but the reference processor
4651   // may be set for MT.  Disable it temporarily here.
4652   ReferenceProcessor* rp = ref_processor();
4653   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4654 
4655   // Do one pass of scrubbing the discovered reference lists
4656   // to remove any reference objects with strongly-reachable
4657   // referents.
4658   if (clean_refs) {
4659     CMSPrecleanRefsYieldClosure yield_cl(this);
4660     assert(rp->span().equals(_span), "Spans should be equal");
4661     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4662                                    &_markStack, true /* preclean */);
4663     CMSDrainMarkingStackClosure complete_trace(this,
4664                                    _span, &_markBitMap, &_markStack,
4665                                    &keep_alive, true /* preclean */);
4666 
4667     // We don't want this step to interfere with a young
4668     // collection because we don't want to take CPU
4669     // or memory bandwidth away from the young GC threads
4670     // (which may be as many as there are CPUs).
4671     // Note that we don't need to protect ourselves from
4672     // interference with mutators because they can't
4673     // manipulate the discovered reference lists nor affect
4674     // the computed reachability of the referents, the
4675     // only properties manipulated by the precleaning
4676     // of these reference lists.
4677     stopTimer();
4678     CMSTokenSyncWithLocks x(true /* is cms thread */,
4679                             bitMapLock());
4680     startTimer();
4681     sample_eden();
4682 
4683     // The following will yield to allow foreground
4684     // collection to proceed promptly. XXX YSR:
4685     // The code in this method may need further
4686     // tweaking for better performance and some restructuring
4687     // for cleaner interfaces.
4688     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4689     rp->preclean_discovered_references(
4690           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4691           gc_timer);
4692   }
4693 
4694   if (clean_survivor) {  // preclean the active survivor space(s)
4695     assert(_young_gen->kind() == Generation::DefNew ||
4696            _young_gen->kind() == Generation::ParNew ||
4697            _young_gen->kind() == Generation::ASParNew,
4698          "incorrect type for cast");
4699     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4700     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4701                              &_markBitMap, &_modUnionTable,
4702                              &_markStack, true /* precleaning phase */);
4703     stopTimer();
4704     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4705                              bitMapLock());
4706     startTimer();
4707     unsigned int before_count =
4708       GenCollectedHeap::heap()->total_collections();
4709     SurvivorSpacePrecleanClosure
4710       sss_cl(this, _span, &_markBitMap, &_markStack,
4711              &pam_cl, before_count, CMSYield);
4712     dng->from()->object_iterate_careful(&sss_cl);
4713     dng->to()->object_iterate_careful(&sss_cl);
4714   }
4715   MarkRefsIntoAndScanClosure
4716     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4717              &_markStack, this, CMSYield,
4718              true /* precleaning phase */);
4719   // CAUTION: The following closure has persistent state that may need to
4720   // be reset upon a decrease in the sequence of addresses it
4721   // processes.
4722   ScanMarkedObjectsAgainCarefullyClosure
4723     smoac_cl(this, _span,
4724       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4725 
4726   // Preclean dirty cards in ModUnionTable and CardTable using
4727   // appropriate convergence criterion;
4728   // repeat CMSPrecleanIter times unless we find that
4729   // we are losing.
4730   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4731   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4732          "Bad convergence multiplier");
4733   assert(CMSPrecleanThreshold >= 100,
4734          "Unreasonably low CMSPrecleanThreshold");
4735 
4736   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4737   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4738        numIter < CMSPrecleanIter;
4739        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4740     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4741     if (Verbose && PrintGCDetails) {
4742       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4743     }
4744     // Either there are very few dirty cards, so re-mark
4745     // pause will be small anyway, or our pre-cleaning isn't
4746     // that much faster than the rate at which cards are being
4747     // dirtied, so we might as well stop and re-mark since
4748     // precleaning won't improve our re-mark time by much.
4749     if (curNumCards <= CMSPrecleanThreshold ||
4750         (numIter > 0 &&
4751          (curNumCards * CMSPrecleanDenominator >
4752          lastNumCards * CMSPrecleanNumerator))) {
4753       numIter++;
4754       cumNumCards += curNumCards;
4755       break;
4756     }
4757   }
4758 
4759   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4760 
4761   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4762   cumNumCards += curNumCards;
4763   if (PrintGCDetails && PrintCMSStatistics != 0) {
4764     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4765                   curNumCards, cumNumCards, numIter);
4766   }
4767   return cumNumCards;   // as a measure of useful work done
4768 }
4769 
4770 // PRECLEANING NOTES:
4771 // Precleaning involves:
4772 // . reading the bits of the modUnionTable and clearing the set bits.
4773 // . For the cards corresponding to the set bits, we scan the
4774 //   objects on those cards. This means we need the free_list_lock
4775 //   so that we can safely iterate over the CMS space when scanning
4776 //   for oops.
4777 // . When we scan the objects, we'll be both reading and setting
4778 //   marks in the marking bit map, so we'll need the marking bit map.
4779 // . For protecting _collector_state transitions, we take the CGC_lock.
4780 //   Note that any races in the reading of of card table entries by the
4781 //   CMS thread on the one hand and the clearing of those entries by the
4782 //   VM thread or the setting of those entries by the mutator threads on the
4783 //   other are quite benign. However, for efficiency it makes sense to keep
4784 //   the VM thread from racing with the CMS thread while the latter is
4785 //   dirty card info to the modUnionTable. We therefore also use the
4786 //   CGC_lock to protect the reading of the card table and the mod union
4787 //   table by the CM thread.
4788 // . We run concurrently with mutator updates, so scanning
4789 //   needs to be done carefully  -- we should not try to scan
4790 //   potentially uninitialized objects.
4791 //
4792 // Locking strategy: While holding the CGC_lock, we scan over and
4793 // reset a maximal dirty range of the mod union / card tables, then lock
4794 // the free_list_lock and bitmap lock to do a full marking, then
4795 // release these locks; and repeat the cycle. This allows for a
4796 // certain amount of fairness in the sharing of these locks between
4797 // the CMS collector on the one hand, and the VM thread and the
4798 // mutators on the other.
4799 
4800 // NOTE: preclean_mod_union_table() and preclean_card_table()
4801 // further below are largely identical; if you need to modify
4802 // one of these methods, please check the other method too.
4803 
4804 size_t CMSCollector::preclean_mod_union_table(
4805   ConcurrentMarkSweepGeneration* gen,
4806   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4807   verify_work_stacks_empty();
4808   verify_overflow_empty();
4809 
4810   // strategy: starting with the first card, accumulate contiguous
4811   // ranges of dirty cards; clear these cards, then scan the region
4812   // covered by these cards.
4813 
4814   // Since all of the MUT is committed ahead, we can just use
4815   // that, in case the generations expand while we are precleaning.
4816   // It might also be fine to just use the committed part of the
4817   // generation, but we might potentially miss cards when the
4818   // generation is rapidly expanding while we are in the midst
4819   // of precleaning.
4820   HeapWord* startAddr = gen->reserved().start();
4821   HeapWord* endAddr   = gen->reserved().end();
4822 
4823   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4824 
4825   size_t numDirtyCards, cumNumDirtyCards;
4826   HeapWord *nextAddr, *lastAddr;
4827   for (cumNumDirtyCards = numDirtyCards = 0,
4828        nextAddr = lastAddr = startAddr;
4829        nextAddr < endAddr;
4830        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4831 
4832     ResourceMark rm;
4833     HandleMark   hm;
4834 
4835     MemRegion dirtyRegion;
4836     {
4837       stopTimer();
4838       // Potential yield point
4839       CMSTokenSync ts(true);
4840       startTimer();
4841       sample_eden();
4842       // Get dirty region starting at nextOffset (inclusive),
4843       // simultaneously clearing it.
4844       dirtyRegion =
4845         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4846       assert(dirtyRegion.start() >= nextAddr,
4847              "returned region inconsistent?");
4848     }
4849     // Remember where the next search should begin.
4850     // The returned region (if non-empty) is a right open interval,
4851     // so lastOffset is obtained from the right end of that
4852     // interval.
4853     lastAddr = dirtyRegion.end();
4854     // Should do something more transparent and less hacky XXX
4855     numDirtyCards =
4856       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4857 
4858     // We'll scan the cards in the dirty region (with periodic
4859     // yields for foreground GC as needed).
4860     if (!dirtyRegion.is_empty()) {
4861       assert(numDirtyCards > 0, "consistency check");
4862       HeapWord* stop_point = NULL;
4863       stopTimer();
4864       // Potential yield point
4865       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4866                                bitMapLock());
4867       startTimer();
4868       {
4869         verify_work_stacks_empty();
4870         verify_overflow_empty();
4871         sample_eden();
4872         stop_point =
4873           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4874       }
4875       if (stop_point != NULL) {
4876         // The careful iteration stopped early either because it found an
4877         // uninitialized object, or because we were in the midst of an
4878         // "abortable preclean", which should now be aborted. Redirty
4879         // the bits corresponding to the partially-scanned or unscanned
4880         // cards. We'll either restart at the next block boundary or
4881         // abort the preclean.
4882         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4883                "Should only be AbortablePreclean.");
4884         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4885         if (should_abort_preclean()) {
4886           break; // out of preclean loop
4887         } else {
4888           // Compute the next address at which preclean should pick up;
4889           // might need bitMapLock in order to read P-bits.
4890           lastAddr = next_card_start_after_block(stop_point);
4891         }
4892       }
4893     } else {
4894       assert(lastAddr == endAddr, "consistency check");
4895       assert(numDirtyCards == 0, "consistency check");
4896       break;
4897     }
4898   }
4899   verify_work_stacks_empty();
4900   verify_overflow_empty();
4901   return cumNumDirtyCards;
4902 }
4903 
4904 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4905 // below are largely identical; if you need to modify
4906 // one of these methods, please check the other method too.
4907 
4908 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4909   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4910   // strategy: it's similar to precleamModUnionTable above, in that
4911   // we accumulate contiguous ranges of dirty cards, mark these cards
4912   // precleaned, then scan the region covered by these cards.
4913   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4914   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4915 
4916   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4917 
4918   size_t numDirtyCards, cumNumDirtyCards;
4919   HeapWord *lastAddr, *nextAddr;
4920 
4921   for (cumNumDirtyCards = numDirtyCards = 0,
4922        nextAddr = lastAddr = startAddr;
4923        nextAddr < endAddr;
4924        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4925 
4926     ResourceMark rm;
4927     HandleMark   hm;
4928 
4929     MemRegion dirtyRegion;
4930     {
4931       // See comments in "Precleaning notes" above on why we
4932       // do this locking. XXX Could the locking overheads be
4933       // too high when dirty cards are sparse? [I don't think so.]
4934       stopTimer();
4935       CMSTokenSync x(true); // is cms thread
4936       startTimer();
4937       sample_eden();
4938       // Get and clear dirty region from card table
4939       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4940                                     MemRegion(nextAddr, endAddr),
4941                                     true,
4942                                     CardTableModRefBS::precleaned_card_val());
4943 
4944       assert(dirtyRegion.start() >= nextAddr,
4945              "returned region inconsistent?");
4946     }
4947     lastAddr = dirtyRegion.end();
4948     numDirtyCards =
4949       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4950 
4951     if (!dirtyRegion.is_empty()) {
4952       stopTimer();
4953       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4954       startTimer();
4955       sample_eden();
4956       verify_work_stacks_empty();
4957       verify_overflow_empty();
4958       HeapWord* stop_point =
4959         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4960       if (stop_point != NULL) {
4961         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4962                "Should only be AbortablePreclean.");
4963         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4964         if (should_abort_preclean()) {
4965           break; // out of preclean loop
4966         } else {
4967           // Compute the next address at which preclean should pick up.
4968           lastAddr = next_card_start_after_block(stop_point);
4969         }
4970       }
4971     } else {
4972       break;
4973     }
4974   }
4975   verify_work_stacks_empty();
4976   verify_overflow_empty();
4977   return cumNumDirtyCards;
4978 }
4979 
4980 class PrecleanKlassClosure : public KlassClosure {
4981   CMKlassClosure _cm_klass_closure;
4982  public:
4983   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4984   void do_klass(Klass* k) {
4985     if (k->has_accumulated_modified_oops()) {
4986       k->clear_accumulated_modified_oops();
4987 
4988       _cm_klass_closure.do_klass(k);
4989     }
4990   }
4991 };
4992 
4993 // The freelist lock is needed to prevent asserts, is it really needed?
4994 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4995 
4996   cl->set_freelistLock(freelistLock);
4997 
4998   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4999 
5000   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5001   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5002   PrecleanKlassClosure preclean_klass_closure(cl);
5003   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
5004 
5005   verify_work_stacks_empty();
5006   verify_overflow_empty();
5007 }
5008 
5009 void CMSCollector::checkpointRootsFinal(bool asynch,
5010   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5011   assert(_collectorState == FinalMarking, "incorrect state transition?");
5012   check_correct_thread_executing();
5013   // world is stopped at this checkpoint
5014   assert(SafepointSynchronize::is_at_safepoint(),
5015          "world should be stopped");
5016   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5017 
5018   verify_work_stacks_empty();
5019   verify_overflow_empty();
5020 
5021   SpecializationStats::clear();
5022   if (PrintGCDetails) {
5023     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
5024                         _young_gen->used() / K,
5025                         _young_gen->capacity() / K);
5026   }
5027   if (asynch) {
5028     if (CMSScavengeBeforeRemark) {
5029       GenCollectedHeap* gch = GenCollectedHeap::heap();
5030       // Temporarily set flag to false, GCH->do_collection will
5031       // expect it to be false and set to true
5032       FlagSetting fl(gch->_is_gc_active, false);
5033       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5034         PrintGCDetails && Verbose, true, _gc_timer_cm);)
5035       int level = _cmsGen->level() - 1;
5036       if (level >= 0) {
5037         gch->do_collection(true,        // full (i.e. force, see below)
5038                            false,       // !clear_all_soft_refs
5039                            0,           // size
5040                            false,       // is_tlab
5041                            level        // max_level
5042                           );
5043       }
5044     }
5045     FreelistLocker x(this);
5046     MutexLockerEx y(bitMapLock(),
5047                     Mutex::_no_safepoint_check_flag);
5048     assert(!init_mark_was_synchronous, "but that's impossible!");
5049     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5050   } else {
5051     // already have all the locks
5052     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5053                              init_mark_was_synchronous);
5054   }
5055   verify_work_stacks_empty();
5056   verify_overflow_empty();
5057   SpecializationStats::print();
5058 }
5059 
5060 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5061   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5062 
5063   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
5064 
5065   assert(haveFreelistLocks(), "must have free list locks");
5066   assert_lock_strong(bitMapLock());
5067 
5068   if (UseAdaptiveSizePolicy) {
5069     size_policy()->checkpoint_roots_final_begin();
5070   }
5071 
5072   ResourceMark rm;
5073   HandleMark   hm;
5074 
5075   GenCollectedHeap* gch = GenCollectedHeap::heap();
5076 
5077   if (should_unload_classes()) {
5078     CodeCache::gc_prologue();
5079   }
5080   assert(haveFreelistLocks(), "must have free list locks");
5081   assert_lock_strong(bitMapLock());
5082 
5083   if (!init_mark_was_synchronous) {
5084     // We might assume that we need not fill TLAB's when
5085     // CMSScavengeBeforeRemark is set, because we may have just done
5086     // a scavenge which would have filled all TLAB's -- and besides
5087     // Eden would be empty. This however may not always be the case --
5088     // for instance although we asked for a scavenge, it may not have
5089     // happened because of a JNI critical section. We probably need
5090     // a policy for deciding whether we can in that case wait until
5091     // the critical section releases and then do the remark following
5092     // the scavenge, and skip it here. In the absence of that policy,
5093     // or of an indication of whether the scavenge did indeed occur,
5094     // we cannot rely on TLAB's having been filled and must do
5095     // so here just in case a scavenge did not happen.
5096     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
5097     // Update the saved marks which may affect the root scans.
5098     gch->save_marks();
5099 
5100     if (CMSPrintEdenSurvivorChunks) {
5101       print_eden_and_survivor_chunk_arrays();
5102     }
5103 
5104     {
5105       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5106 
5107       // Note on the role of the mod union table:
5108       // Since the marker in "markFromRoots" marks concurrently with
5109       // mutators, it is possible for some reachable objects not to have been
5110       // scanned. For instance, an only reference to an object A was
5111       // placed in object B after the marker scanned B. Unless B is rescanned,
5112       // A would be collected. Such updates to references in marked objects
5113       // are detected via the mod union table which is the set of all cards
5114       // dirtied since the first checkpoint in this GC cycle and prior to
5115       // the most recent young generation GC, minus those cleaned up by the
5116       // concurrent precleaning.
5117       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5118         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5119         do_remark_parallel();
5120       } else {
5121         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5122                     _gc_timer_cm);
5123         do_remark_non_parallel();
5124       }
5125     }
5126   } else {
5127     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5128     // The initial mark was stop-world, so there's no rescanning to
5129     // do; go straight on to the next step below.
5130   }
5131   verify_work_stacks_empty();
5132   verify_overflow_empty();
5133 
5134   {
5135     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
5136     refProcessingWork(asynch, clear_all_soft_refs);
5137   }
5138   verify_work_stacks_empty();
5139   verify_overflow_empty();
5140 
5141   if (should_unload_classes()) {
5142     CodeCache::gc_epilogue();
5143   }
5144   JvmtiExport::gc_epilogue();
5145 
5146   // If we encountered any (marking stack / work queue) overflow
5147   // events during the current CMS cycle, take appropriate
5148   // remedial measures, where possible, so as to try and avoid
5149   // recurrence of that condition.
5150   assert(_markStack.isEmpty(), "No grey objects");
5151   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5152                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5153   if (ser_ovflw > 0) {
5154     if (PrintCMSStatistics != 0) {
5155       gclog_or_tty->print_cr("Marking stack overflow (benign) "
5156         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5157         ", kac_preclean="SIZE_FORMAT")",
5158         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5159         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5160     }
5161     _markStack.expand();
5162     _ser_pmc_remark_ovflw = 0;
5163     _ser_pmc_preclean_ovflw = 0;
5164     _ser_kac_preclean_ovflw = 0;
5165     _ser_kac_ovflw = 0;
5166   }
5167   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5168     if (PrintCMSStatistics != 0) {
5169       gclog_or_tty->print_cr("Work queue overflow (benign) "
5170         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5171         _par_pmc_remark_ovflw, _par_kac_ovflw);
5172     }
5173     _par_pmc_remark_ovflw = 0;
5174     _par_kac_ovflw = 0;
5175   }
5176   if (PrintCMSStatistics != 0) {
5177      if (_markStack._hit_limit > 0) {
5178        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5179                               _markStack._hit_limit);
5180      }
5181      if (_markStack._failed_double > 0) {
5182        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5183                               " current capacity "SIZE_FORMAT,
5184                               _markStack._failed_double,
5185                               _markStack.capacity());
5186      }
5187   }
5188   _markStack._hit_limit = 0;
5189   _markStack._failed_double = 0;
5190 
5191   if ((VerifyAfterGC || VerifyDuringGC) &&
5192       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5193     verify_after_remark();
5194   }
5195 
5196   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5197 
5198   // Change under the freelistLocks.
5199   _collectorState = Sweeping;
5200   // Call isAllClear() under bitMapLock
5201   assert(_modUnionTable.isAllClear(),
5202       "Should be clear by end of the final marking");
5203   assert(_ct->klass_rem_set()->mod_union_is_clear(),
5204       "Should be clear by end of the final marking");
5205   if (UseAdaptiveSizePolicy) {
5206     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5207   }
5208 }
5209 
5210 void CMSParInitialMarkTask::work(uint worker_id) {
5211   elapsedTimer _timer;
5212   ResourceMark rm;
5213   HandleMark   hm;
5214 
5215   // ---------- scan from roots --------------
5216   _timer.start();
5217   GenCollectedHeap* gch = GenCollectedHeap::heap();
5218   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5219   CMKlassClosure klass_closure(&par_mri_cl);
5220 
5221   // ---------- young gen roots --------------
5222   {
5223     work_on_young_gen_roots(worker_id, &par_mri_cl);
5224     _timer.stop();
5225     if (PrintCMSStatistics != 0) {
5226       gclog_or_tty->print_cr(
5227         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5228         worker_id, _timer.seconds());
5229     }
5230   }
5231 
5232   // ---------- remaining roots --------------
5233   _timer.reset();
5234   _timer.start();
5235   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5236                                 false,     // yg was scanned above
5237                                 false,     // this is parallel code
5238                                 false,     // not scavenging
5239                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5240                                 &par_mri_cl,
5241                                 true,   // walk all of code cache if (so & SO_CodeCache)
5242                                 NULL,
5243                                 &klass_closure);
5244   assert(_collector->should_unload_classes()
5245          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5246          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5247   _timer.stop();
5248   if (PrintCMSStatistics != 0) {
5249     gclog_or_tty->print_cr(
5250       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5251       worker_id, _timer.seconds());
5252   }
5253 }
5254 
5255 // Parallel remark task
5256 class CMSParRemarkTask: public CMSParMarkTask {
5257   CompactibleFreeListSpace* _cms_space;
5258 
5259   // The per-thread work queues, available here for stealing.
5260   OopTaskQueueSet*       _task_queues;
5261   ParallelTaskTerminator _term;
5262 
5263  public:
5264   // A value of 0 passed to n_workers will cause the number of
5265   // workers to be taken from the active workers in the work gang.
5266   CMSParRemarkTask(CMSCollector* collector,
5267                    CompactibleFreeListSpace* cms_space,
5268                    int n_workers, FlexibleWorkGang* workers,
5269                    OopTaskQueueSet* task_queues):
5270     CMSParMarkTask("Rescan roots and grey objects in parallel",
5271                    collector, n_workers),
5272     _cms_space(cms_space),
5273     _task_queues(task_queues),
5274     _term(n_workers, task_queues) { }
5275 
5276   OopTaskQueueSet* task_queues() { return _task_queues; }
5277 
5278   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5279 
5280   ParallelTaskTerminator* terminator() { return &_term; }
5281   int n_workers() { return _n_workers; }
5282 
5283   void work(uint worker_id);
5284 
5285  private:
5286   // ... of  dirty cards in old space
5287   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5288                                   Par_MarkRefsIntoAndScanClosure* cl);
5289 
5290   // ... work stealing for the above
5291   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5292 };
5293 
5294 class RemarkKlassClosure : public KlassClosure {
5295   CMKlassClosure _cm_klass_closure;
5296  public:
5297   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5298   void do_klass(Klass* k) {
5299     // Check if we have modified any oops in the Klass during the concurrent marking.
5300     if (k->has_accumulated_modified_oops()) {
5301       k->clear_accumulated_modified_oops();
5302 
5303       // We could have transfered the current modified marks to the accumulated marks,
5304       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5305     } else if (k->has_modified_oops()) {
5306       // Don't clear anything, this info is needed by the next young collection.
5307     } else {
5308       // No modified oops in the Klass.
5309       return;
5310     }
5311 
5312     // The klass has modified fields, need to scan the klass.
5313     _cm_klass_closure.do_klass(k);
5314   }
5315 };
5316 
5317 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5318   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5319   EdenSpace* eden_space = dng->eden();
5320   ContiguousSpace* from_space = dng->from();
5321   ContiguousSpace* to_space   = dng->to();
5322 
5323   HeapWord** eca = _collector->_eden_chunk_array;
5324   size_t     ect = _collector->_eden_chunk_index;
5325   HeapWord** sca = _collector->_survivor_chunk_array;
5326   size_t     sct = _collector->_survivor_chunk_index;
5327 
5328   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5329   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5330 
5331   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5332   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5333   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5334 }
5335 
5336 // work_queue(i) is passed to the closure
5337 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5338 // also is passed to do_dirty_card_rescan_tasks() and to
5339 // do_work_steal() to select the i-th task_queue.
5340 
5341 void CMSParRemarkTask::work(uint worker_id) {
5342   elapsedTimer _timer;
5343   ResourceMark rm;
5344   HandleMark   hm;
5345 
5346   // ---------- rescan from roots --------------
5347   _timer.start();
5348   GenCollectedHeap* gch = GenCollectedHeap::heap();
5349   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5350     _collector->_span, _collector->ref_processor(),
5351     &(_collector->_markBitMap),
5352     work_queue(worker_id));
5353 
5354   // Rescan young gen roots first since these are likely
5355   // coarsely partitioned and may, on that account, constitute
5356   // the critical path; thus, it's best to start off that
5357   // work first.
5358   // ---------- young gen roots --------------
5359   {
5360     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5361     _timer.stop();
5362     if (PrintCMSStatistics != 0) {
5363       gclog_or_tty->print_cr(
5364         "Finished young gen rescan work in %dth thread: %3.3f sec",
5365         worker_id, _timer.seconds());
5366     }
5367   }
5368 
5369   // ---------- remaining roots --------------
5370   _timer.reset();
5371   _timer.start();
5372   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5373                                 false,     // yg was scanned above
5374                                 false,     // this is parallel code
5375                                 false,     // not scavenging
5376                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5377                                 &par_mrias_cl,
5378                                 true,   // walk all of code cache if (so & SO_CodeCache)
5379                                 NULL,
5380                                 NULL);     // The dirty klasses will be handled below
5381   assert(_collector->should_unload_classes()
5382          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5383          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5384   _timer.stop();
5385   if (PrintCMSStatistics != 0) {
5386     gclog_or_tty->print_cr(
5387       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5388       worker_id, _timer.seconds());
5389   }
5390 
5391   // ---------- unhandled CLD scanning ----------
5392   if (worker_id == 0) { // Single threaded at the moment.
5393     _timer.reset();
5394     _timer.start();
5395 
5396     // Scan all new class loader data objects and new dependencies that were
5397     // introduced during concurrent marking.
5398     ResourceMark rm;
5399     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5400     for (int i = 0; i < array->length(); i++) {
5401       par_mrias_cl.do_class_loader_data(array->at(i));
5402     }
5403 
5404     // We don't need to keep track of new CLDs anymore.
5405     ClassLoaderDataGraph::remember_new_clds(false);
5406 
5407     _timer.stop();
5408     if (PrintCMSStatistics != 0) {
5409       gclog_or_tty->print_cr(
5410           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5411           worker_id, _timer.seconds());
5412     }
5413   }
5414 
5415   // ---------- dirty klass scanning ----------
5416   if (worker_id == 0) { // Single threaded at the moment.
5417     _timer.reset();
5418     _timer.start();
5419 
5420     // Scan all classes that was dirtied during the concurrent marking phase.
5421     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5422     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5423 
5424     _timer.stop();
5425     if (PrintCMSStatistics != 0) {
5426       gclog_or_tty->print_cr(
5427           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5428           worker_id, _timer.seconds());
5429     }
5430   }
5431 
5432   // We might have added oops to ClassLoaderData::_handles during the
5433   // concurrent marking phase. These oops point to newly allocated objects
5434   // that are guaranteed to be kept alive. Either by the direct allocation
5435   // code, or when the young collector processes the strong roots. Hence,
5436   // we don't have to revisit the _handles block during the remark phase.
5437 
5438   // ---------- rescan dirty cards ------------
5439   _timer.reset();
5440   _timer.start();
5441 
5442   // Do the rescan tasks for each of the two spaces
5443   // (cms_space) in turn.
5444   // "worker_id" is passed to select the task_queue for "worker_id"
5445   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5446   _timer.stop();
5447   if (PrintCMSStatistics != 0) {
5448     gclog_or_tty->print_cr(
5449       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5450       worker_id, _timer.seconds());
5451   }
5452 
5453   // ---------- steal work from other threads ...
5454   // ---------- ... and drain overflow list.
5455   _timer.reset();
5456   _timer.start();
5457   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5458   _timer.stop();
5459   if (PrintCMSStatistics != 0) {
5460     gclog_or_tty->print_cr(
5461       "Finished work stealing in %dth thread: %3.3f sec",
5462       worker_id, _timer.seconds());
5463   }
5464 }
5465 
5466 // Note that parameter "i" is not used.
5467 void
5468 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5469   OopsInGenClosure* cl, ContiguousSpace* space,
5470   HeapWord** chunk_array, size_t chunk_top) {
5471   // Until all tasks completed:
5472   // . claim an unclaimed task
5473   // . compute region boundaries corresponding to task claimed
5474   //   using chunk_array
5475   // . par_oop_iterate(cl) over that region
5476 
5477   ResourceMark rm;
5478   HandleMark   hm;
5479 
5480   SequentialSubTasksDone* pst = space->par_seq_tasks();
5481   assert(pst->valid(), "Uninitialized use?");
5482 
5483   uint nth_task = 0;
5484   uint n_tasks  = pst->n_tasks();
5485 
5486   HeapWord *start, *end;
5487   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5488     // We claimed task # nth_task; compute its boundaries.
5489     if (chunk_top == 0) {  // no samples were taken
5490       assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5491       start = space->bottom();
5492       end   = space->top();
5493     } else if (nth_task == 0) {
5494       start = space->bottom();
5495       end   = chunk_array[nth_task];
5496     } else if (nth_task < (uint)chunk_top) {
5497       assert(nth_task >= 1, "Control point invariant");
5498       start = chunk_array[nth_task - 1];
5499       end   = chunk_array[nth_task];
5500     } else {
5501       assert(nth_task == (uint)chunk_top, "Control point invariant");
5502       start = chunk_array[chunk_top - 1];
5503       end   = space->top();
5504     }
5505     MemRegion mr(start, end);
5506     // Verify that mr is in space
5507     assert(mr.is_empty() || space->used_region().contains(mr),
5508            "Should be in space");
5509     // Verify that "start" is an object boundary
5510     assert(mr.is_empty() || oop(mr.start())->is_oop(),
5511            "Should be an oop");
5512     space->par_oop_iterate(mr, cl);
5513   }
5514   pst->all_tasks_completed();
5515 }
5516 
5517 void
5518 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5519   CompactibleFreeListSpace* sp, int i,
5520   Par_MarkRefsIntoAndScanClosure* cl) {
5521   // Until all tasks completed:
5522   // . claim an unclaimed task
5523   // . compute region boundaries corresponding to task claimed
5524   // . transfer dirty bits ct->mut for that region
5525   // . apply rescanclosure to dirty mut bits for that region
5526 
5527   ResourceMark rm;
5528   HandleMark   hm;
5529 
5530   OopTaskQueue* work_q = work_queue(i);
5531   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5532   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5533   // CAUTION: This closure has state that persists across calls to
5534   // the work method dirty_range_iterate_clear() in that it has
5535   // imbedded in it a (subtype of) UpwardsObjectClosure. The
5536   // use of that state in the imbedded UpwardsObjectClosure instance
5537   // assumes that the cards are always iterated (even if in parallel
5538   // by several threads) in monotonically increasing order per each
5539   // thread. This is true of the implementation below which picks
5540   // card ranges (chunks) in monotonically increasing order globally
5541   // and, a-fortiori, in monotonically increasing order per thread
5542   // (the latter order being a subsequence of the former).
5543   // If the work code below is ever reorganized into a more chaotic
5544   // work-partitioning form than the current "sequential tasks"
5545   // paradigm, the use of that persistent state will have to be
5546   // revisited and modified appropriately. See also related
5547   // bug 4756801 work on which should examine this code to make
5548   // sure that the changes there do not run counter to the
5549   // assumptions made here and necessary for correctness and
5550   // efficiency. Note also that this code might yield inefficient
5551   // behaviour in the case of very large objects that span one or
5552   // more work chunks. Such objects would potentially be scanned
5553   // several times redundantly. Work on 4756801 should try and
5554   // address that performance anomaly if at all possible. XXX
5555   MemRegion  full_span  = _collector->_span;
5556   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
5557   MarkFromDirtyCardsClosure
5558     greyRescanClosure(_collector, full_span, // entire span of interest
5559                       sp, bm, work_q, cl);
5560 
5561   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5562   assert(pst->valid(), "Uninitialized use?");
5563   uint nth_task = 0;
5564   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5565   MemRegion span = sp->used_region();
5566   HeapWord* start_addr = span.start();
5567   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5568                                            alignment);
5569   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5570   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5571          start_addr, "Check alignment");
5572   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5573          chunk_size, "Check alignment");
5574 
5575   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5576     // Having claimed the nth_task, compute corresponding mem-region,
5577     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5578     // The alignment restriction ensures that we do not need any
5579     // synchronization with other gang-workers while setting or
5580     // clearing bits in thus chunk of the MUT.
5581     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5582                                     start_addr + (nth_task+1)*chunk_size);
5583     // The last chunk's end might be way beyond end of the
5584     // used region. In that case pull back appropriately.
5585     if (this_span.end() > end_addr) {
5586       this_span.set_end(end_addr);
5587       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5588     }
5589     // Iterate over the dirty cards covering this chunk, marking them
5590     // precleaned, and setting the corresponding bits in the mod union
5591     // table. Since we have been careful to partition at Card and MUT-word
5592     // boundaries no synchronization is needed between parallel threads.
5593     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5594                                                  &modUnionClosure);
5595 
5596     // Having transferred these marks into the modUnionTable,
5597     // rescan the marked objects on the dirty cards in the modUnionTable.
5598     // Even if this is at a synchronous collection, the initial marking
5599     // may have been done during an asynchronous collection so there
5600     // may be dirty bits in the mod-union table.
5601     _collector->_modUnionTable.dirty_range_iterate_clear(
5602                   this_span, &greyRescanClosure);
5603     _collector->_modUnionTable.verifyNoOneBitsInRange(
5604                                  this_span.start(),
5605                                  this_span.end());
5606   }
5607   pst->all_tasks_completed();  // declare that i am done
5608 }
5609 
5610 // . see if we can share work_queues with ParNew? XXX
5611 void
5612 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5613                                 int* seed) {
5614   OopTaskQueue* work_q = work_queue(i);
5615   NOT_PRODUCT(int num_steals = 0;)
5616   oop obj_to_scan;
5617   CMSBitMap* bm = &(_collector->_markBitMap);
5618 
5619   while (true) {
5620     // Completely finish any left over work from (an) earlier round(s)
5621     cl->trim_queue(0);
5622     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5623                                          (size_t)ParGCDesiredObjsFromOverflowList);
5624     // Now check if there's any work in the overflow list
5625     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5626     // only affects the number of attempts made to get work from the
5627     // overflow list and does not affect the number of workers.  Just
5628     // pass ParallelGCThreads so this behavior is unchanged.
5629     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5630                                                 work_q,
5631                                                 ParallelGCThreads)) {
5632       // found something in global overflow list;
5633       // not yet ready to go stealing work from others.
5634       // We'd like to assert(work_q->size() != 0, ...)
5635       // because we just took work from the overflow list,
5636       // but of course we can't since all of that could have
5637       // been already stolen from us.
5638       // "He giveth and He taketh away."
5639       continue;
5640     }
5641     // Verify that we have no work before we resort to stealing
5642     assert(work_q->size() == 0, "Have work, shouldn't steal");
5643     // Try to steal from other queues that have work
5644     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5645       NOT_PRODUCT(num_steals++;)
5646       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5647       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5648       // Do scanning work
5649       obj_to_scan->oop_iterate(cl);
5650       // Loop around, finish this work, and try to steal some more
5651     } else if (terminator()->offer_termination()) {
5652         break;  // nirvana from the infinite cycle
5653     }
5654   }
5655   NOT_PRODUCT(
5656     if (PrintCMSStatistics != 0) {
5657       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5658     }
5659   )
5660   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5661          "Else our work is not yet done");
5662 }
5663 
5664 // Record object boundaries in _eden_chunk_array by sampling the eden
5665 // top in the slow-path eden object allocation code path and record
5666 // the boundaries, if CMSEdenChunksRecordAlways is true. If
5667 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
5668 // sampling in sample_eden() that activates during the part of the
5669 // preclean phase.
5670 void CMSCollector::sample_eden_chunk() {
5671   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5672     if (_eden_chunk_lock->try_lock()) {
5673       // Record a sample. This is the critical section. The contents
5674       // of the _eden_chunk_array have to be non-decreasing in the
5675       // address order.
5676       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
5677       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5678              "Unexpected state of Eden");
5679       if (_eden_chunk_index == 0 ||
5680           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5681            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
5682                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5683         _eden_chunk_index++;  // commit sample
5684       }
5685       _eden_chunk_lock->unlock();
5686     }
5687   }
5688 }
5689 
5690 // Return a thread-local PLAB recording array, as appropriate.
5691 void* CMSCollector::get_data_recorder(int thr_num) {
5692   if (_survivor_plab_array != NULL &&
5693       (CMSPLABRecordAlways ||
5694        (_collectorState > Marking && _collectorState < FinalMarking))) {
5695     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5696     ChunkArray* ca = &_survivor_plab_array[thr_num];
5697     ca->reset();   // clear it so that fresh data is recorded
5698     return (void*) ca;
5699   } else {
5700     return NULL;
5701   }
5702 }
5703 
5704 // Reset all the thread-local PLAB recording arrays
5705 void CMSCollector::reset_survivor_plab_arrays() {
5706   for (uint i = 0; i < ParallelGCThreads; i++) {
5707     _survivor_plab_array[i].reset();
5708   }
5709 }
5710 
5711 // Merge the per-thread plab arrays into the global survivor chunk
5712 // array which will provide the partitioning of the survivor space
5713 // for CMS initial scan and rescan.
5714 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5715                                               int no_of_gc_threads) {
5716   assert(_survivor_plab_array  != NULL, "Error");
5717   assert(_survivor_chunk_array != NULL, "Error");
5718   assert(_collectorState == FinalMarking ||
5719          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5720   for (int j = 0; j < no_of_gc_threads; j++) {
5721     _cursor[j] = 0;
5722   }
5723   HeapWord* top = surv->top();
5724   size_t i;
5725   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5726     HeapWord* min_val = top;          // Higher than any PLAB address
5727     uint      min_tid = 0;            // position of min_val this round
5728     for (int j = 0; j < no_of_gc_threads; j++) {
5729       ChunkArray* cur_sca = &_survivor_plab_array[j];
5730       if (_cursor[j] == cur_sca->end()) {
5731         continue;
5732       }
5733       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5734       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5735       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5736       if (cur_val < min_val) {
5737         min_tid = j;
5738         min_val = cur_val;
5739       } else {
5740         assert(cur_val < top, "All recorded addresses should be less");
5741       }
5742     }
5743     // At this point min_val and min_tid are respectively
5744     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5745     // and the thread (j) that witnesses that address.
5746     // We record this address in the _survivor_chunk_array[i]
5747     // and increment _cursor[min_tid] prior to the next round i.
5748     if (min_val == top) {
5749       break;
5750     }
5751     _survivor_chunk_array[i] = min_val;
5752     _cursor[min_tid]++;
5753   }
5754   // We are all done; record the size of the _survivor_chunk_array
5755   _survivor_chunk_index = i; // exclusive: [0, i)
5756   if (PrintCMSStatistics > 0) {
5757     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5758   }
5759   // Verify that we used up all the recorded entries
5760   #ifdef ASSERT
5761     size_t total = 0;
5762     for (int j = 0; j < no_of_gc_threads; j++) {
5763       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5764       total += _cursor[j];
5765     }
5766     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5767     // Check that the merged array is in sorted order
5768     if (total > 0) {
5769       for (size_t i = 0; i < total - 1; i++) {
5770         if (PrintCMSStatistics > 0) {
5771           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5772                               i, _survivor_chunk_array[i]);
5773         }
5774         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5775                "Not sorted");
5776       }
5777     }
5778   #endif // ASSERT
5779 }
5780 
5781 // Set up the space's par_seq_tasks structure for work claiming
5782 // for parallel initial scan and rescan of young gen.
5783 // See ParRescanTask where this is currently used.
5784 void
5785 CMSCollector::
5786 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5787   assert(n_threads > 0, "Unexpected n_threads argument");
5788   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5789 
5790   // Eden space
5791   {
5792     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5793     assert(!pst->valid(), "Clobbering existing data?");
5794     // Each valid entry in [0, _eden_chunk_index) represents a task.
5795     size_t n_tasks = _eden_chunk_index + 1;
5796     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5797     // Sets the condition for completion of the subtask (how many threads
5798     // need to finish in order to be done).
5799     pst->set_n_threads(n_threads);
5800     pst->set_n_tasks((int)n_tasks);
5801   }
5802 
5803   // Merge the survivor plab arrays into _survivor_chunk_array
5804   if (_survivor_plab_array != NULL) {
5805     merge_survivor_plab_arrays(dng->from(), n_threads);
5806   } else {
5807     assert(_survivor_chunk_index == 0, "Error");
5808   }
5809 
5810   // To space
5811   {
5812     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5813     assert(!pst->valid(), "Clobbering existing data?");
5814     // Sets the condition for completion of the subtask (how many threads
5815     // need to finish in order to be done).
5816     pst->set_n_threads(n_threads);
5817     pst->set_n_tasks(1);
5818     assert(pst->valid(), "Error");
5819   }
5820 
5821   // From space
5822   {
5823     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5824     assert(!pst->valid(), "Clobbering existing data?");
5825     size_t n_tasks = _survivor_chunk_index + 1;
5826     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5827     // Sets the condition for completion of the subtask (how many threads
5828     // need to finish in order to be done).
5829     pst->set_n_threads(n_threads);
5830     pst->set_n_tasks((int)n_tasks);
5831     assert(pst->valid(), "Error");
5832   }
5833 }
5834 
5835 // Parallel version of remark
5836 void CMSCollector::do_remark_parallel() {
5837   GenCollectedHeap* gch = GenCollectedHeap::heap();
5838   FlexibleWorkGang* workers = gch->workers();
5839   assert(workers != NULL, "Need parallel worker threads.");
5840   // Choose to use the number of GC workers most recently set
5841   // into "active_workers".  If active_workers is not set, set it
5842   // to ParallelGCThreads.
5843   int n_workers = workers->active_workers();
5844   if (n_workers == 0) {
5845     assert(n_workers > 0, "Should have been set during scavenge");
5846     n_workers = ParallelGCThreads;
5847     workers->set_active_workers(n_workers);
5848   }
5849   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5850 
5851   CMSParRemarkTask tsk(this,
5852     cms_space,
5853     n_workers, workers, task_queues());
5854 
5855   // Set up for parallel process_strong_roots work.
5856   gch->set_par_threads(n_workers);
5857   // We won't be iterating over the cards in the card table updating
5858   // the younger_gen cards, so we shouldn't call the following else
5859   // the verification code as well as subsequent younger_refs_iterate
5860   // code would get confused. XXX
5861   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5862 
5863   // The young gen rescan work will not be done as part of
5864   // process_strong_roots (which currently doesn't knw how to
5865   // parallelize such a scan), but rather will be broken up into
5866   // a set of parallel tasks (via the sampling that the [abortable]
5867   // preclean phase did of EdenSpace, plus the [two] tasks of
5868   // scanning the [two] survivor spaces. Further fine-grain
5869   // parallelization of the scanning of the survivor spaces
5870   // themselves, and of precleaning of the younger gen itself
5871   // is deferred to the future.
5872   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5873 
5874   // The dirty card rescan work is broken up into a "sequence"
5875   // of parallel tasks (per constituent space) that are dynamically
5876   // claimed by the parallel threads.
5877   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5878 
5879   // It turns out that even when we're using 1 thread, doing the work in a
5880   // separate thread causes wide variance in run times.  We can't help this
5881   // in the multi-threaded case, but we special-case n=1 here to get
5882   // repeatable measurements of the 1-thread overhead of the parallel code.
5883   if (n_workers > 1) {
5884     // Make refs discovery MT-safe, if it isn't already: it may not
5885     // necessarily be so, since it's possible that we are doing
5886     // ST marking.
5887     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5888     GenCollectedHeap::StrongRootsScope srs(gch);
5889     workers->run_task(&tsk);
5890   } else {
5891     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5892     GenCollectedHeap::StrongRootsScope srs(gch);
5893     tsk.work(0);
5894   }
5895 
5896   gch->set_par_threads(0);  // 0 ==> non-parallel.
5897   // restore, single-threaded for now, any preserved marks
5898   // as a result of work_q overflow
5899   restore_preserved_marks_if_any();
5900 }
5901 
5902 // Non-parallel version of remark
5903 void CMSCollector::do_remark_non_parallel() {
5904   ResourceMark rm;
5905   HandleMark   hm;
5906   GenCollectedHeap* gch = GenCollectedHeap::heap();
5907   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5908 
5909   MarkRefsIntoAndScanClosure
5910     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5911              &_markStack, this,
5912              false /* should_yield */, false /* not precleaning */);
5913   MarkFromDirtyCardsClosure
5914     markFromDirtyCardsClosure(this, _span,
5915                               NULL,  // space is set further below
5916                               &_markBitMap, &_markStack, &mrias_cl);
5917   {
5918     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5919     // Iterate over the dirty cards, setting the corresponding bits in the
5920     // mod union table.
5921     {
5922       ModUnionClosure modUnionClosure(&_modUnionTable);
5923       _ct->ct_bs()->dirty_card_iterate(
5924                       _cmsGen->used_region(),
5925                       &modUnionClosure);
5926     }
5927     // Having transferred these marks into the modUnionTable, we just need
5928     // to rescan the marked objects on the dirty cards in the modUnionTable.
5929     // The initial marking may have been done during an asynchronous
5930     // collection so there may be dirty bits in the mod-union table.
5931     const int alignment =
5932       CardTableModRefBS::card_size * BitsPerWord;
5933     {
5934       // ... First handle dirty cards in CMS gen
5935       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5936       MemRegion ur = _cmsGen->used_region();
5937       HeapWord* lb = ur.start();
5938       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5939       MemRegion cms_span(lb, ub);
5940       _modUnionTable.dirty_range_iterate_clear(cms_span,
5941                                                &markFromDirtyCardsClosure);
5942       verify_work_stacks_empty();
5943       if (PrintCMSStatistics != 0) {
5944         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5945           markFromDirtyCardsClosure.num_dirty_cards());
5946       }
5947     }
5948   }
5949   if (VerifyDuringGC &&
5950       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5951     HandleMark hm;  // Discard invalid handles created during verification
5952     Universe::verify();
5953   }
5954   {
5955     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5956 
5957     verify_work_stacks_empty();
5958 
5959     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5960     GenCollectedHeap::StrongRootsScope srs(gch);
5961     gch->gen_process_strong_roots(_cmsGen->level(),
5962                                   true,  // younger gens as roots
5963                                   false, // use the local StrongRootsScope
5964                                   false, // not scavenging
5965                                   SharedHeap::ScanningOption(roots_scanning_options()),
5966                                   &mrias_cl,
5967                                   true,   // walk code active on stacks
5968                                   NULL,
5969                                   NULL);  // The dirty klasses will be handled below
5970 
5971     assert(should_unload_classes()
5972            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5973            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5974   }
5975 
5976   {
5977     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5978 
5979     verify_work_stacks_empty();
5980 
5981     // Scan all class loader data objects that might have been introduced
5982     // during concurrent marking.
5983     ResourceMark rm;
5984     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5985     for (int i = 0; i < array->length(); i++) {
5986       mrias_cl.do_class_loader_data(array->at(i));
5987     }
5988 
5989     // We don't need to keep track of new CLDs anymore.
5990     ClassLoaderDataGraph::remember_new_clds(false);
5991 
5992     verify_work_stacks_empty();
5993   }
5994 
5995   {
5996     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5997 
5998     verify_work_stacks_empty();
5999 
6000     RemarkKlassClosure remark_klass_closure(&mrias_cl);
6001     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6002 
6003     verify_work_stacks_empty();
6004   }
6005 
6006   // We might have added oops to ClassLoaderData::_handles during the
6007   // concurrent marking phase. These oops point to newly allocated objects
6008   // that are guaranteed to be kept alive. Either by the direct allocation
6009   // code, or when the young collector processes the strong roots. Hence,
6010   // we don't have to revisit the _handles block during the remark phase.
6011 
6012   verify_work_stacks_empty();
6013   // Restore evacuated mark words, if any, used for overflow list links
6014   if (!CMSOverflowEarlyRestoration) {
6015     restore_preserved_marks_if_any();
6016   }
6017   verify_overflow_empty();
6018 }
6019 
6020 ////////////////////////////////////////////////////////
6021 // Parallel Reference Processing Task Proxy Class
6022 ////////////////////////////////////////////////////////
6023 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
6024   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
6025   CMSCollector*          _collector;
6026   CMSBitMap*             _mark_bit_map;
6027   const MemRegion        _span;
6028   ProcessTask&           _task;
6029 
6030 public:
6031   CMSRefProcTaskProxy(ProcessTask&     task,
6032                       CMSCollector*    collector,
6033                       const MemRegion& span,
6034                       CMSBitMap*       mark_bit_map,
6035                       AbstractWorkGang* workers,
6036                       OopTaskQueueSet* task_queues):
6037     // XXX Should superclass AGTWOQ also know about AWG since it knows
6038     // about the task_queues used by the AWG? Then it could initialize
6039     // the terminator() object. See 6984287. The set_for_termination()
6040     // below is a temporary band-aid for the regression in 6984287.
6041     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
6042       task_queues),
6043     _task(task),
6044     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
6045   {
6046     assert(_collector->_span.equals(_span) && !_span.is_empty(),
6047            "Inconsistency in _span");
6048     set_for_termination(workers->active_workers());
6049   }
6050 
6051   OopTaskQueueSet* task_queues() { return queues(); }
6052 
6053   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6054 
6055   void do_work_steal(int i,
6056                      CMSParDrainMarkingStackClosure* drain,
6057                      CMSParKeepAliveClosure* keep_alive,
6058                      int* seed);
6059 
6060   virtual void work(uint worker_id);
6061 };
6062 
6063 void CMSRefProcTaskProxy::work(uint worker_id) {
6064   assert(_collector->_span.equals(_span), "Inconsistency in _span");
6065   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6066                                         _mark_bit_map,
6067                                         work_queue(worker_id));
6068   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6069                                                  _mark_bit_map,
6070                                                  work_queue(worker_id));
6071   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6072   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6073   if (_task.marks_oops_alive()) {
6074     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6075                   _collector->hash_seed(worker_id));
6076   }
6077   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6078   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6079 }
6080 
6081 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6082   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6083   EnqueueTask& _task;
6084 
6085 public:
6086   CMSRefEnqueueTaskProxy(EnqueueTask& task)
6087     : AbstractGangTask("Enqueue reference objects in parallel"),
6088       _task(task)
6089   { }
6090 
6091   virtual void work(uint worker_id)
6092   {
6093     _task.work(worker_id);
6094   }
6095 };
6096 
6097 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6098   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6099    _span(span),
6100    _bit_map(bit_map),
6101    _work_queue(work_queue),
6102    _mark_and_push(collector, span, bit_map, work_queue),
6103    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6104                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6105 { }
6106 
6107 // . see if we can share work_queues with ParNew? XXX
6108 void CMSRefProcTaskProxy::do_work_steal(int i,
6109   CMSParDrainMarkingStackClosure* drain,
6110   CMSParKeepAliveClosure* keep_alive,
6111   int* seed) {
6112   OopTaskQueue* work_q = work_queue(i);
6113   NOT_PRODUCT(int num_steals = 0;)
6114   oop obj_to_scan;
6115 
6116   while (true) {
6117     // Completely finish any left over work from (an) earlier round(s)
6118     drain->trim_queue(0);
6119     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6120                                          (size_t)ParGCDesiredObjsFromOverflowList);
6121     // Now check if there's any work in the overflow list
6122     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6123     // only affects the number of attempts made to get work from the
6124     // overflow list and does not affect the number of workers.  Just
6125     // pass ParallelGCThreads so this behavior is unchanged.
6126     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6127                                                 work_q,
6128                                                 ParallelGCThreads)) {
6129       // Found something in global overflow list;
6130       // not yet ready to go stealing work from others.
6131       // We'd like to assert(work_q->size() != 0, ...)
6132       // because we just took work from the overflow list,
6133       // but of course we can't, since all of that might have
6134       // been already stolen from us.
6135       continue;
6136     }
6137     // Verify that we have no work before we resort to stealing
6138     assert(work_q->size() == 0, "Have work, shouldn't steal");
6139     // Try to steal from other queues that have work
6140     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6141       NOT_PRODUCT(num_steals++;)
6142       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6143       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6144       // Do scanning work
6145       obj_to_scan->oop_iterate(keep_alive);
6146       // Loop around, finish this work, and try to steal some more
6147     } else if (terminator()->offer_termination()) {
6148       break;  // nirvana from the infinite cycle
6149     }
6150   }
6151   NOT_PRODUCT(
6152     if (PrintCMSStatistics != 0) {
6153       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6154     }
6155   )
6156 }
6157 
6158 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6159 {
6160   GenCollectedHeap* gch = GenCollectedHeap::heap();
6161   FlexibleWorkGang* workers = gch->workers();
6162   assert(workers != NULL, "Need parallel worker threads.");
6163   CMSRefProcTaskProxy rp_task(task, &_collector,
6164                               _collector.ref_processor()->span(),
6165                               _collector.markBitMap(),
6166                               workers, _collector.task_queues());
6167   workers->run_task(&rp_task);
6168 }
6169 
6170 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6171 {
6172 
6173   GenCollectedHeap* gch = GenCollectedHeap::heap();
6174   FlexibleWorkGang* workers = gch->workers();
6175   assert(workers != NULL, "Need parallel worker threads.");
6176   CMSRefEnqueueTaskProxy enq_task(task);
6177   workers->run_task(&enq_task);
6178 }
6179 
6180 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6181 
6182   ResourceMark rm;
6183   HandleMark   hm;
6184 
6185   ReferenceProcessor* rp = ref_processor();
6186   assert(rp->span().equals(_span), "Spans should be equal");
6187   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6188   // Process weak references.
6189   rp->setup_policy(clear_all_soft_refs);
6190   verify_work_stacks_empty();
6191 
6192   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6193                                           &_markStack, false /* !preclean */);
6194   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6195                                 _span, &_markBitMap, &_markStack,
6196                                 &cmsKeepAliveClosure, false /* !preclean */);
6197   {
6198     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
6199 
6200     ReferenceProcessorStats stats;
6201     if (rp->processing_is_mt()) {
6202       // Set the degree of MT here.  If the discovery is done MT, there
6203       // may have been a different number of threads doing the discovery
6204       // and a different number of discovered lists may have Ref objects.
6205       // That is OK as long as the Reference lists are balanced (see
6206       // balance_all_queues() and balance_queues()).
6207       GenCollectedHeap* gch = GenCollectedHeap::heap();
6208       int active_workers = ParallelGCThreads;
6209       FlexibleWorkGang* workers = gch->workers();
6210       if (workers != NULL) {
6211         active_workers = workers->active_workers();
6212         // The expectation is that active_workers will have already
6213         // been set to a reasonable value.  If it has not been set,
6214         // investigate.
6215         assert(active_workers > 0, "Should have been set during scavenge");
6216       }
6217       rp->set_active_mt_degree(active_workers);
6218       CMSRefProcTaskExecutor task_executor(*this);
6219       stats = rp->process_discovered_references(&_is_alive_closure,
6220                                         &cmsKeepAliveClosure,
6221                                         &cmsDrainMarkingStackClosure,
6222                                         &task_executor,
6223                                         _gc_timer_cm);
6224     } else {
6225       stats = rp->process_discovered_references(&_is_alive_closure,
6226                                         &cmsKeepAliveClosure,
6227                                         &cmsDrainMarkingStackClosure,
6228                                         NULL,
6229                                         _gc_timer_cm);
6230     }
6231     _gc_tracer_cm->report_gc_reference_stats(stats);
6232 
6233   }
6234 
6235   // This is the point where the entire marking should have completed.
6236   verify_work_stacks_empty();
6237 
6238   if (should_unload_classes()) {
6239     {
6240       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
6241 
6242       // Unload classes and purge the SystemDictionary.
6243       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6244 
6245       // Unload nmethods.
6246       CodeCache::do_unloading(&_is_alive_closure, purged_class);
6247 
6248       // Prune dead klasses from subklass/sibling/implementor lists.
6249       Klass::clean_weak_klass_links(&_is_alive_closure);
6250     }
6251 
6252     {
6253       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
6254       // Clean up unreferenced symbols in symbol table.
6255       SymbolTable::unlink();
6256     }
6257   }
6258 
6259   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6260   // Need to check if we really scanned the StringTable.
6261   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6262     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
6263     // Delete entries for dead interned strings.
6264     StringTable::unlink(&_is_alive_closure);
6265   }
6266 
6267   // Restore any preserved marks as a result of mark stack or
6268   // work queue overflow
6269   restore_preserved_marks_if_any();  // done single-threaded for now
6270 
6271   rp->set_enqueuing_is_done(true);
6272   if (rp->processing_is_mt()) {
6273     rp->balance_all_queues();
6274     CMSRefProcTaskExecutor task_executor(*this);
6275     rp->enqueue_discovered_references(&task_executor);
6276   } else {
6277     rp->enqueue_discovered_references(NULL);
6278   }
6279   rp->verify_no_references_recorded();
6280   assert(!rp->discovery_enabled(), "should have been disabled");
6281 }
6282 
6283 #ifndef PRODUCT
6284 void CMSCollector::check_correct_thread_executing() {
6285   Thread* t = Thread::current();
6286   // Only the VM thread or the CMS thread should be here.
6287   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6288          "Unexpected thread type");
6289   // If this is the vm thread, the foreground process
6290   // should not be waiting.  Note that _foregroundGCIsActive is
6291   // true while the foreground collector is waiting.
6292   if (_foregroundGCShouldWait) {
6293     // We cannot be the VM thread
6294     assert(t->is_ConcurrentGC_thread(),
6295            "Should be CMS thread");
6296   } else {
6297     // We can be the CMS thread only if we are in a stop-world
6298     // phase of CMS collection.
6299     if (t->is_ConcurrentGC_thread()) {
6300       assert(_collectorState == InitialMarking ||
6301              _collectorState == FinalMarking,
6302              "Should be a stop-world phase");
6303       // The CMS thread should be holding the CMS_token.
6304       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6305              "Potential interference with concurrently "
6306              "executing VM thread");
6307     }
6308   }
6309 }
6310 #endif
6311 
6312 void CMSCollector::sweep(bool asynch) {
6313   assert(_collectorState == Sweeping, "just checking");
6314   check_correct_thread_executing();
6315   verify_work_stacks_empty();
6316   verify_overflow_empty();
6317   increment_sweep_count();
6318   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6319 
6320   _inter_sweep_timer.stop();
6321   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6322   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6323 
6324   assert(!_intra_sweep_timer.is_active(), "Should not be active");
6325   _intra_sweep_timer.reset();
6326   _intra_sweep_timer.start();
6327   if (asynch) {
6328     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6329     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6330     // First sweep the old gen
6331     {
6332       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6333                                bitMapLock());
6334       sweepWork(_cmsGen, asynch);
6335     }
6336 
6337     // Update Universe::_heap_*_at_gc figures.
6338     // We need all the free list locks to make the abstract state
6339     // transition from Sweeping to Resetting. See detailed note
6340     // further below.
6341     {
6342       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6343       // Update heap occupancy information which is used as
6344       // input to soft ref clearing policy at the next gc.
6345       Universe::update_heap_info_at_gc();
6346       _collectorState = Resizing;
6347     }
6348   } else {
6349     // already have needed locks
6350     sweepWork(_cmsGen,  asynch);
6351     // Update heap occupancy information which is used as
6352     // input to soft ref clearing policy at the next gc.
6353     Universe::update_heap_info_at_gc();
6354     _collectorState = Resizing;
6355   }
6356   verify_work_stacks_empty();
6357   verify_overflow_empty();
6358 
6359   if (should_unload_classes()) {
6360     ClassLoaderDataGraph::purge();
6361   }
6362 
6363   _intra_sweep_timer.stop();
6364   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6365 
6366   _inter_sweep_timer.reset();
6367   _inter_sweep_timer.start();
6368 
6369   // We need to use a monotonically non-deccreasing time in ms
6370   // or we will see time-warp warnings and os::javaTimeMillis()
6371   // does not guarantee monotonicity.
6372   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6373   update_time_of_last_gc(now);
6374 
6375   // NOTE on abstract state transitions:
6376   // Mutators allocate-live and/or mark the mod-union table dirty
6377   // based on the state of the collection.  The former is done in
6378   // the interval [Marking, Sweeping] and the latter in the interval
6379   // [Marking, Sweeping).  Thus the transitions into the Marking state
6380   // and out of the Sweeping state must be synchronously visible
6381   // globally to the mutators.
6382   // The transition into the Marking state happens with the world
6383   // stopped so the mutators will globally see it.  Sweeping is
6384   // done asynchronously by the background collector so the transition
6385   // from the Sweeping state to the Resizing state must be done
6386   // under the freelistLock (as is the check for whether to
6387   // allocate-live and whether to dirty the mod-union table).
6388   assert(_collectorState == Resizing, "Change of collector state to"
6389     " Resizing must be done under the freelistLocks (plural)");
6390 
6391   // Now that sweeping has been completed, we clear
6392   // the incremental_collection_failed flag,
6393   // thus inviting a younger gen collection to promote into
6394   // this generation. If such a promotion may still fail,
6395   // the flag will be set again when a young collection is
6396   // attempted.
6397   GenCollectedHeap* gch = GenCollectedHeap::heap();
6398   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
6399   gch->update_full_collections_completed(_collection_count_start);
6400 }
6401 
6402 // FIX ME!!! Looks like this belongs in CFLSpace, with
6403 // CMSGen merely delegating to it.
6404 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6405   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6406   HeapWord*  minAddr        = _cmsSpace->bottom();
6407   HeapWord*  largestAddr    =
6408     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6409   if (largestAddr == NULL) {
6410     // The dictionary appears to be empty.  In this case
6411     // try to coalesce at the end of the heap.
6412     largestAddr = _cmsSpace->end();
6413   }
6414   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
6415   size_t nearLargestOffset =
6416     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6417   if (PrintFLSStatistics != 0) {
6418     gclog_or_tty->print_cr(
6419       "CMS: Large Block: " PTR_FORMAT ";"
6420       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6421       largestAddr,
6422       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6423   }
6424   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6425 }
6426 
6427 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6428   return addr >= _cmsSpace->nearLargestChunk();
6429 }
6430 
6431 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6432   return _cmsSpace->find_chunk_at_end();
6433 }
6434 
6435 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6436                                                     bool full) {
6437   // The next lower level has been collected.  Gather any statistics
6438   // that are of interest at this point.
6439   if (!full && (current_level + 1) == level()) {
6440     // Gather statistics on the young generation collection.
6441     collector()->stats().record_gc0_end(used());
6442   }
6443 }
6444 
6445 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6446   GenCollectedHeap* gch = GenCollectedHeap::heap();
6447   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6448     "Wrong type of heap");
6449   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6450     gch->gen_policy()->size_policy();
6451   assert(sp->is_gc_cms_adaptive_size_policy(),
6452     "Wrong type of size policy");
6453   return sp;
6454 }
6455 
6456 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6457   if (PrintGCDetails && Verbose) {
6458     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6459   }
6460   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6461   _debug_collection_type =
6462     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6463   if (PrintGCDetails && Verbose) {
6464     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6465   }
6466 }
6467 
6468 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6469   bool asynch) {
6470   // We iterate over the space(s) underlying this generation,
6471   // checking the mark bit map to see if the bits corresponding
6472   // to specific blocks are marked or not. Blocks that are
6473   // marked are live and are not swept up. All remaining blocks
6474   // are swept up, with coalescing on-the-fly as we sweep up
6475   // contiguous free and/or garbage blocks:
6476   // We need to ensure that the sweeper synchronizes with allocators
6477   // and stop-the-world collectors. In particular, the following
6478   // locks are used:
6479   // . CMS token: if this is held, a stop the world collection cannot occur
6480   // . freelistLock: if this is held no allocation can occur from this
6481   //                 generation by another thread
6482   // . bitMapLock: if this is held, no other thread can access or update
6483   //
6484 
6485   // Note that we need to hold the freelistLock if we use
6486   // block iterate below; else the iterator might go awry if
6487   // a mutator (or promotion) causes block contents to change
6488   // (for instance if the allocator divvies up a block).
6489   // If we hold the free list lock, for all practical purposes
6490   // young generation GC's can't occur (they'll usually need to
6491   // promote), so we might as well prevent all young generation
6492   // GC's while we do a sweeping step. For the same reason, we might
6493   // as well take the bit map lock for the entire duration
6494 
6495   // check that we hold the requisite locks
6496   assert(have_cms_token(), "Should hold cms token");
6497   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6498          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6499         "Should possess CMS token to sweep");
6500   assert_lock_strong(gen->freelistLock());
6501   assert_lock_strong(bitMapLock());
6502 
6503   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6504   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
6505   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6506                                       _inter_sweep_estimate.padded_average(),
6507                                       _intra_sweep_estimate.padded_average());
6508   gen->setNearLargestChunk();
6509 
6510   {
6511     SweepClosure sweepClosure(this, gen, &_markBitMap,
6512                             CMSYield && asynch);
6513     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6514     // We need to free-up/coalesce garbage/blocks from a
6515     // co-terminal free run. This is done in the SweepClosure
6516     // destructor; so, do not remove this scope, else the
6517     // end-of-sweep-census below will be off by a little bit.
6518   }
6519   gen->cmsSpace()->sweep_completed();
6520   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6521   if (should_unload_classes()) {                // unloaded classes this cycle,
6522     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6523   } else {                                      // did not unload classes,
6524     _concurrent_cycles_since_last_unload++;     // ... increment count
6525   }
6526 }
6527 
6528 // Reset CMS data structures (for now just the marking bit map)
6529 // preparatory for the next cycle.
6530 void CMSCollector::reset(bool asynch) {
6531   GenCollectedHeap* gch = GenCollectedHeap::heap();
6532   CMSAdaptiveSizePolicy* sp = size_policy();
6533   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6534   if (asynch) {
6535     CMSTokenSyncWithLocks ts(true, bitMapLock());
6536 
6537     // If the state is not "Resetting", the foreground  thread
6538     // has done a collection and the resetting.
6539     if (_collectorState != Resetting) {
6540       assert(_collectorState == Idling, "The state should only change"
6541         " because the foreground collector has finished the collection");
6542       return;
6543     }
6544 
6545     // Clear the mark bitmap (no grey objects to start with)
6546     // for the next cycle.
6547     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6548     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6549 
6550     HeapWord* curAddr = _markBitMap.startWord();
6551     while (curAddr < _markBitMap.endWord()) {
6552       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6553       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6554       _markBitMap.clear_large_range(chunk);
6555       if (ConcurrentMarkSweepThread::should_yield() &&
6556           !foregroundGCIsActive() &&
6557           CMSYield) {
6558         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6559                "CMS thread should hold CMS token");
6560         assert_lock_strong(bitMapLock());
6561         bitMapLock()->unlock();
6562         ConcurrentMarkSweepThread::desynchronize(true);
6563         ConcurrentMarkSweepThread::acknowledge_yield_request();
6564         stopTimer();
6565         if (PrintCMSStatistics != 0) {
6566           incrementYields();
6567         }
6568         icms_wait();
6569 
6570         // See the comment in coordinator_yield()
6571         for (unsigned i = 0; i < CMSYieldSleepCount &&
6572                          ConcurrentMarkSweepThread::should_yield() &&
6573                          !CMSCollector::foregroundGCIsActive(); ++i) {
6574           os::sleep(Thread::current(), 1, false);
6575           ConcurrentMarkSweepThread::acknowledge_yield_request();
6576         }
6577 
6578         ConcurrentMarkSweepThread::synchronize(true);
6579         bitMapLock()->lock_without_safepoint_check();
6580         startTimer();
6581       }
6582       curAddr = chunk.end();
6583     }
6584     // A successful mostly concurrent collection has been done.
6585     // Because only the full (i.e., concurrent mode failure) collections
6586     // are being measured for gc overhead limits, clean the "near" flag
6587     // and count.
6588     sp->reset_gc_overhead_limit_count();
6589     _collectorState = Idling;
6590   } else {
6591     // already have the lock
6592     assert(_collectorState == Resetting, "just checking");
6593     assert_lock_strong(bitMapLock());
6594     _markBitMap.clear_all();
6595     _collectorState = Idling;
6596   }
6597 
6598   // Stop incremental mode after a cycle completes, so that any future cycles
6599   // are triggered by allocation.
6600   stop_icms();
6601 
6602   NOT_PRODUCT(
6603     if (RotateCMSCollectionTypes) {
6604       _cmsGen->rotate_debug_collection_type();
6605     }
6606   )
6607 
6608   register_gc_end();
6609 }
6610 
6611 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6612   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6613   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6614   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
6615   TraceCollectorStats tcs(counters());
6616 
6617   switch (op) {
6618     case CMS_op_checkpointRootsInitial: {
6619       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6620       checkpointRootsInitial(true);       // asynch
6621       if (PrintGC) {
6622         _cmsGen->printOccupancy("initial-mark");
6623       }
6624       break;
6625     }
6626     case CMS_op_checkpointRootsFinal: {
6627       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6628       checkpointRootsFinal(true,    // asynch
6629                            false,   // !clear_all_soft_refs
6630                            false);  // !init_mark_was_synchronous
6631       if (PrintGC) {
6632         _cmsGen->printOccupancy("remark");
6633       }
6634       break;
6635     }
6636     default:
6637       fatal("No such CMS_op");
6638   }
6639 }
6640 
6641 #ifndef PRODUCT
6642 size_t const CMSCollector::skip_header_HeapWords() {
6643   return FreeChunk::header_size();
6644 }
6645 
6646 // Try and collect here conditions that should hold when
6647 // CMS thread is exiting. The idea is that the foreground GC
6648 // thread should not be blocked if it wants to terminate
6649 // the CMS thread and yet continue to run the VM for a while
6650 // after that.
6651 void CMSCollector::verify_ok_to_terminate() const {
6652   assert(Thread::current()->is_ConcurrentGC_thread(),
6653          "should be called by CMS thread");
6654   assert(!_foregroundGCShouldWait, "should be false");
6655   // We could check here that all the various low-level locks
6656   // are not held by the CMS thread, but that is overkill; see
6657   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6658   // is checked.
6659 }
6660 #endif
6661 
6662 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6663    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6664           "missing Printezis mark?");
6665   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6666   size_t size = pointer_delta(nextOneAddr + 1, addr);
6667   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6668          "alignment problem");
6669   assert(size >= 3, "Necessary for Printezis marks to work");
6670   return size;
6671 }
6672 
6673 // A variant of the above (block_size_using_printezis_bits()) except
6674 // that we return 0 if the P-bits are not yet set.
6675 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6676   if (_markBitMap.isMarked(addr + 1)) {
6677     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6678     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6679     size_t size = pointer_delta(nextOneAddr + 1, addr);
6680     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6681            "alignment problem");
6682     assert(size >= 3, "Necessary for Printezis marks to work");
6683     return size;
6684   }
6685   return 0;
6686 }
6687 
6688 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6689   size_t sz = 0;
6690   oop p = (oop)addr;
6691   if (p->klass_or_null() != NULL) {
6692     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6693   } else {
6694     sz = block_size_using_printezis_bits(addr);
6695   }
6696   assert(sz > 0, "size must be nonzero");
6697   HeapWord* next_block = addr + sz;
6698   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
6699                                              CardTableModRefBS::card_size);
6700   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
6701          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6702          "must be different cards");
6703   return next_card;
6704 }
6705 
6706 
6707 // CMS Bit Map Wrapper /////////////////////////////////////////
6708 
6709 // Construct a CMS bit map infrastructure, but don't create the
6710 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6711 // further below.
6712 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6713   _bm(),
6714   _shifter(shifter),
6715   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6716 {
6717   _bmStartWord = 0;
6718   _bmWordSize  = 0;
6719 }
6720 
6721 bool CMSBitMap::allocate(MemRegion mr) {
6722   _bmStartWord = mr.start();
6723   _bmWordSize  = mr.word_size();
6724   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6725                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6726   if (!brs.is_reserved()) {
6727     warning("CMS bit map allocation failure");
6728     return false;
6729   }
6730   // For now we'll just commit all of the bit map up fromt.
6731   // Later on we'll try to be more parsimonious with swap.
6732   if (!_virtual_space.initialize(brs, brs.size())) {
6733     warning("CMS bit map backing store failure");
6734     return false;
6735   }
6736   assert(_virtual_space.committed_size() == brs.size(),
6737          "didn't reserve backing store for all of CMS bit map?");
6738   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6739   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6740          _bmWordSize, "inconsistency in bit map sizing");
6741   _bm.set_size(_bmWordSize >> _shifter);
6742 
6743   // bm.clear(); // can we rely on getting zero'd memory? verify below
6744   assert(isAllClear(),
6745          "Expected zero'd memory from ReservedSpace constructor");
6746   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6747          "consistency check");
6748   return true;
6749 }
6750 
6751 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6752   HeapWord *next_addr, *end_addr, *last_addr;
6753   assert_locked();
6754   assert(covers(mr), "out-of-range error");
6755   // XXX assert that start and end are appropriately aligned
6756   for (next_addr = mr.start(), end_addr = mr.end();
6757        next_addr < end_addr; next_addr = last_addr) {
6758     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6759     last_addr = dirty_region.end();
6760     if (!dirty_region.is_empty()) {
6761       cl->do_MemRegion(dirty_region);
6762     } else {
6763       assert(last_addr == end_addr, "program logic");
6764       return;
6765     }
6766   }
6767 }
6768 
6769 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6770   _bm.print_on_error(st, prefix);
6771 }
6772 
6773 #ifndef PRODUCT
6774 void CMSBitMap::assert_locked() const {
6775   CMSLockVerifier::assert_locked(lock());
6776 }
6777 
6778 bool CMSBitMap::covers(MemRegion mr) const {
6779   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6780   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6781          "size inconsistency");
6782   return (mr.start() >= _bmStartWord) &&
6783          (mr.end()   <= endWord());
6784 }
6785 
6786 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6787     return (start >= _bmStartWord && (start + size) <= endWord());
6788 }
6789 
6790 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6791   // verify that there are no 1 bits in the interval [left, right)
6792   FalseBitMapClosure falseBitMapClosure;
6793   iterate(&falseBitMapClosure, left, right);
6794 }
6795 
6796 void CMSBitMap::region_invariant(MemRegion mr)
6797 {
6798   assert_locked();
6799   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6800   assert(!mr.is_empty(), "unexpected empty region");
6801   assert(covers(mr), "mr should be covered by bit map");
6802   // convert address range into offset range
6803   size_t start_ofs = heapWordToOffset(mr.start());
6804   // Make sure that end() is appropriately aligned
6805   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6806                         (1 << (_shifter+LogHeapWordSize))),
6807          "Misaligned mr.end()");
6808   size_t end_ofs   = heapWordToOffset(mr.end());
6809   assert(end_ofs > start_ofs, "Should mark at least one bit");
6810 }
6811 
6812 #endif
6813 
6814 bool CMSMarkStack::allocate(size_t size) {
6815   // allocate a stack of the requisite depth
6816   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6817                    size * sizeof(oop)));
6818   if (!rs.is_reserved()) {
6819     warning("CMSMarkStack allocation failure");
6820     return false;
6821   }
6822   if (!_virtual_space.initialize(rs, rs.size())) {
6823     warning("CMSMarkStack backing store failure");
6824     return false;
6825   }
6826   assert(_virtual_space.committed_size() == rs.size(),
6827          "didn't reserve backing store for all of CMS stack?");
6828   _base = (oop*)(_virtual_space.low());
6829   _index = 0;
6830   _capacity = size;
6831   NOT_PRODUCT(_max_depth = 0);
6832   return true;
6833 }
6834 
6835 // XXX FIX ME !!! In the MT case we come in here holding a
6836 // leaf lock. For printing we need to take a further lock
6837 // which has lower rank. We need to recallibrate the two
6838 // lock-ranks involved in order to be able to rpint the
6839 // messages below. (Or defer the printing to the caller.
6840 // For now we take the expedient path of just disabling the
6841 // messages for the problematic case.)
6842 void CMSMarkStack::expand() {
6843   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6844   if (_capacity == MarkStackSizeMax) {
6845     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6846       // We print a warning message only once per CMS cycle.
6847       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6848     }
6849     return;
6850   }
6851   // Double capacity if possible
6852   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6853   // Do not give up existing stack until we have managed to
6854   // get the double capacity that we desired.
6855   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6856                    new_capacity * sizeof(oop)));
6857   if (rs.is_reserved()) {
6858     // Release the backing store associated with old stack
6859     _virtual_space.release();
6860     // Reinitialize virtual space for new stack
6861     if (!_virtual_space.initialize(rs, rs.size())) {
6862       fatal("Not enough swap for expanded marking stack");
6863     }
6864     _base = (oop*)(_virtual_space.low());
6865     _index = 0;
6866     _capacity = new_capacity;
6867   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6868     // Failed to double capacity, continue;
6869     // we print a detail message only once per CMS cycle.
6870     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6871             SIZE_FORMAT"K",
6872             _capacity / K, new_capacity / K);
6873   }
6874 }
6875 
6876 
6877 // Closures
6878 // XXX: there seems to be a lot of code  duplication here;
6879 // should refactor and consolidate common code.
6880 
6881 // This closure is used to mark refs into the CMS generation in
6882 // the CMS bit map. Called at the first checkpoint. This closure
6883 // assumes that we do not need to re-mark dirty cards; if the CMS
6884 // generation on which this is used is not an oldest
6885 // generation then this will lose younger_gen cards!
6886 
6887 MarkRefsIntoClosure::MarkRefsIntoClosure(
6888   MemRegion span, CMSBitMap* bitMap):
6889     _span(span),
6890     _bitMap(bitMap)
6891 {
6892     assert(_ref_processor == NULL, "deliberately left NULL");
6893     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6894 }
6895 
6896 void MarkRefsIntoClosure::do_oop(oop obj) {
6897   // if p points into _span, then mark corresponding bit in _markBitMap
6898   assert(obj->is_oop(), "expected an oop");
6899   HeapWord* addr = (HeapWord*)obj;
6900   if (_span.contains(addr)) {
6901     // this should be made more efficient
6902     _bitMap->mark(addr);
6903   }
6904 }
6905 
6906 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6907 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6908 
6909 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6910   MemRegion span, CMSBitMap* bitMap):
6911     _span(span),
6912     _bitMap(bitMap)
6913 {
6914     assert(_ref_processor == NULL, "deliberately left NULL");
6915     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6916 }
6917 
6918 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6919   // if p points into _span, then mark corresponding bit in _markBitMap
6920   assert(obj->is_oop(), "expected an oop");
6921   HeapWord* addr = (HeapWord*)obj;
6922   if (_span.contains(addr)) {
6923     // this should be made more efficient
6924     _bitMap->par_mark(addr);
6925   }
6926 }
6927 
6928 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6929 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6930 
6931 // A variant of the above, used for CMS marking verification.
6932 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6933   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6934     _span(span),
6935     _verification_bm(verification_bm),
6936     _cms_bm(cms_bm)
6937 {
6938     assert(_ref_processor == NULL, "deliberately left NULL");
6939     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6940 }
6941 
6942 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6943   // if p points into _span, then mark corresponding bit in _markBitMap
6944   assert(obj->is_oop(), "expected an oop");
6945   HeapWord* addr = (HeapWord*)obj;
6946   if (_span.contains(addr)) {
6947     _verification_bm->mark(addr);
6948     if (!_cms_bm->isMarked(addr)) {
6949       oop(addr)->print();
6950       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6951       fatal("... aborting");
6952     }
6953   }
6954 }
6955 
6956 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6957 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6958 
6959 //////////////////////////////////////////////////
6960 // MarkRefsIntoAndScanClosure
6961 //////////////////////////////////////////////////
6962 
6963 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6964                                                        ReferenceProcessor* rp,
6965                                                        CMSBitMap* bit_map,
6966                                                        CMSBitMap* mod_union_table,
6967                                                        CMSMarkStack*  mark_stack,
6968                                                        CMSCollector* collector,
6969                                                        bool should_yield,
6970                                                        bool concurrent_precleaning):
6971   _collector(collector),
6972   _span(span),
6973   _bit_map(bit_map),
6974   _mark_stack(mark_stack),
6975   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6976                       mark_stack, concurrent_precleaning),
6977   _yield(should_yield),
6978   _concurrent_precleaning(concurrent_precleaning),
6979   _freelistLock(NULL)
6980 {
6981   _ref_processor = rp;
6982   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6983 }
6984 
6985 // This closure is used to mark refs into the CMS generation at the
6986 // second (final) checkpoint, and to scan and transitively follow
6987 // the unmarked oops. It is also used during the concurrent precleaning
6988 // phase while scanning objects on dirty cards in the CMS generation.
6989 // The marks are made in the marking bit map and the marking stack is
6990 // used for keeping the (newly) grey objects during the scan.
6991 // The parallel version (Par_...) appears further below.
6992 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6993   if (obj != NULL) {
6994     assert(obj->is_oop(), "expected an oop");
6995     HeapWord* addr = (HeapWord*)obj;
6996     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6997     assert(_collector->overflow_list_is_empty(),
6998            "overflow list should be empty");
6999     if (_span.contains(addr) &&
7000         !_bit_map->isMarked(addr)) {
7001       // mark bit map (object is now grey)
7002       _bit_map->mark(addr);
7003       // push on marking stack (stack should be empty), and drain the
7004       // stack by applying this closure to the oops in the oops popped
7005       // from the stack (i.e. blacken the grey objects)
7006       bool res = _mark_stack->push(obj);
7007       assert(res, "Should have space to push on empty stack");
7008       do {
7009         oop new_oop = _mark_stack->pop();
7010         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7011         assert(_bit_map->isMarked((HeapWord*)new_oop),
7012                "only grey objects on this stack");
7013         // iterate over the oops in this oop, marking and pushing
7014         // the ones in CMS heap (i.e. in _span).
7015         new_oop->oop_iterate(&_pushAndMarkClosure);
7016         // check if it's time to yield
7017         do_yield_check();
7018       } while (!_mark_stack->isEmpty() ||
7019                (!_concurrent_precleaning && take_from_overflow_list()));
7020         // if marking stack is empty, and we are not doing this
7021         // during precleaning, then check the overflow list
7022     }
7023     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7024     assert(_collector->overflow_list_is_empty(),
7025            "overflow list was drained above");
7026     // We could restore evacuated mark words, if any, used for
7027     // overflow list links here because the overflow list is
7028     // provably empty here. That would reduce the maximum
7029     // size requirements for preserved_{oop,mark}_stack.
7030     // But we'll just postpone it until we are all done
7031     // so we can just stream through.
7032     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
7033       _collector->restore_preserved_marks_if_any();
7034       assert(_collector->no_preserved_marks(), "No preserved marks");
7035     }
7036     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
7037            "All preserved marks should have been restored above");
7038   }
7039 }
7040 
7041 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7042 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7043 
7044 void MarkRefsIntoAndScanClosure::do_yield_work() {
7045   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7046          "CMS thread should hold CMS token");
7047   assert_lock_strong(_freelistLock);
7048   assert_lock_strong(_bit_map->lock());
7049   // relinquish the free_list_lock and bitMaplock()
7050   _bit_map->lock()->unlock();
7051   _freelistLock->unlock();
7052   ConcurrentMarkSweepThread::desynchronize(true);
7053   ConcurrentMarkSweepThread::acknowledge_yield_request();
7054   _collector->stopTimer();
7055   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7056   if (PrintCMSStatistics != 0) {
7057     _collector->incrementYields();
7058   }
7059   _collector->icms_wait();
7060 
7061   // See the comment in coordinator_yield()
7062   for (unsigned i = 0;
7063        i < CMSYieldSleepCount &&
7064        ConcurrentMarkSweepThread::should_yield() &&
7065        !CMSCollector::foregroundGCIsActive();
7066        ++i) {
7067     os::sleep(Thread::current(), 1, false);
7068     ConcurrentMarkSweepThread::acknowledge_yield_request();
7069   }
7070 
7071   ConcurrentMarkSweepThread::synchronize(true);
7072   _freelistLock->lock_without_safepoint_check();
7073   _bit_map->lock()->lock_without_safepoint_check();
7074   _collector->startTimer();
7075 }
7076 
7077 ///////////////////////////////////////////////////////////
7078 // Par_MarkRefsIntoAndScanClosure: a parallel version of
7079 //                                 MarkRefsIntoAndScanClosure
7080 ///////////////////////////////////////////////////////////
7081 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7082   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7083   CMSBitMap* bit_map, OopTaskQueue* work_queue):
7084   _span(span),
7085   _bit_map(bit_map),
7086   _work_queue(work_queue),
7087   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7088                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7089   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7090 {
7091   _ref_processor = rp;
7092   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7093 }
7094 
7095 // This closure is used to mark refs into the CMS generation at the
7096 // second (final) checkpoint, and to scan and transitively follow
7097 // the unmarked oops. The marks are made in the marking bit map and
7098 // the work_queue is used for keeping the (newly) grey objects during
7099 // the scan phase whence they are also available for stealing by parallel
7100 // threads. Since the marking bit map is shared, updates are
7101 // synchronized (via CAS).
7102 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7103   if (obj != NULL) {
7104     // Ignore mark word because this could be an already marked oop
7105     // that may be chained at the end of the overflow list.
7106     assert(obj->is_oop(true), "expected an oop");
7107     HeapWord* addr = (HeapWord*)obj;
7108     if (_span.contains(addr) &&
7109         !_bit_map->isMarked(addr)) {
7110       // mark bit map (object will become grey):
7111       // It is possible for several threads to be
7112       // trying to "claim" this object concurrently;
7113       // the unique thread that succeeds in marking the
7114       // object first will do the subsequent push on
7115       // to the work queue (or overflow list).
7116       if (_bit_map->par_mark(addr)) {
7117         // push on work_queue (which may not be empty), and trim the
7118         // queue to an appropriate length by applying this closure to
7119         // the oops in the oops popped from the stack (i.e. blacken the
7120         // grey objects)
7121         bool res = _work_queue->push(obj);
7122         assert(res, "Low water mark should be less than capacity?");
7123         trim_queue(_low_water_mark);
7124       } // Else, another thread claimed the object
7125     }
7126   }
7127 }
7128 
7129 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7130 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7131 
7132 // This closure is used to rescan the marked objects on the dirty cards
7133 // in the mod union table and the card table proper.
7134 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7135   oop p, MemRegion mr) {
7136 
7137   size_t size = 0;
7138   HeapWord* addr = (HeapWord*)p;
7139   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7140   assert(_span.contains(addr), "we are scanning the CMS generation");
7141   // check if it's time to yield
7142   if (do_yield_check()) {
7143     // We yielded for some foreground stop-world work,
7144     // and we have been asked to abort this ongoing preclean cycle.
7145     return 0;
7146   }
7147   if (_bitMap->isMarked(addr)) {
7148     // it's marked; is it potentially uninitialized?
7149     if (p->klass_or_null() != NULL) {
7150         // an initialized object; ignore mark word in verification below
7151         // since we are running concurrent with mutators
7152         assert(p->is_oop(true), "should be an oop");
7153         if (p->is_objArray()) {
7154           // objArrays are precisely marked; restrict scanning
7155           // to dirty cards only.
7156           size = CompactibleFreeListSpace::adjustObjectSize(
7157                    p->oop_iterate(_scanningClosure, mr));
7158         } else {
7159           // A non-array may have been imprecisely marked; we need
7160           // to scan object in its entirety.
7161           size = CompactibleFreeListSpace::adjustObjectSize(
7162                    p->oop_iterate(_scanningClosure));
7163         }
7164         #ifdef ASSERT
7165           size_t direct_size =
7166             CompactibleFreeListSpace::adjustObjectSize(p->size());
7167           assert(size == direct_size, "Inconsistency in size");
7168           assert(size >= 3, "Necessary for Printezis marks to work");
7169           if (!_bitMap->isMarked(addr+1)) {
7170             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7171           } else {
7172             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7173             assert(_bitMap->isMarked(addr+size-1),
7174                    "inconsistent Printezis mark");
7175           }
7176         #endif // ASSERT
7177     } else {
7178       // an unitialized object
7179       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7180       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7181       size = pointer_delta(nextOneAddr + 1, addr);
7182       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7183              "alignment problem");
7184       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7185       // will dirty the card when the klass pointer is installed in the
7186       // object (signalling the completion of initialization).
7187     }
7188   } else {
7189     // Either a not yet marked object or an uninitialized object
7190     if (p->klass_or_null() == NULL) {
7191       // An uninitialized object, skip to the next card, since
7192       // we may not be able to read its P-bits yet.
7193       assert(size == 0, "Initial value");
7194     } else {
7195       // An object not (yet) reached by marking: we merely need to
7196       // compute its size so as to go look at the next block.
7197       assert(p->is_oop(true), "should be an oop");
7198       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7199     }
7200   }
7201   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7202   return size;
7203 }
7204 
7205 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7206   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7207          "CMS thread should hold CMS token");
7208   assert_lock_strong(_freelistLock);
7209   assert_lock_strong(_bitMap->lock());
7210   // relinquish the free_list_lock and bitMaplock()
7211   _bitMap->lock()->unlock();
7212   _freelistLock->unlock();
7213   ConcurrentMarkSweepThread::desynchronize(true);
7214   ConcurrentMarkSweepThread::acknowledge_yield_request();
7215   _collector->stopTimer();
7216   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7217   if (PrintCMSStatistics != 0) {
7218     _collector->incrementYields();
7219   }
7220   _collector->icms_wait();
7221 
7222   // See the comment in coordinator_yield()
7223   for (unsigned i = 0; i < CMSYieldSleepCount &&
7224                    ConcurrentMarkSweepThread::should_yield() &&
7225                    !CMSCollector::foregroundGCIsActive(); ++i) {
7226     os::sleep(Thread::current(), 1, false);
7227     ConcurrentMarkSweepThread::acknowledge_yield_request();
7228   }
7229 
7230   ConcurrentMarkSweepThread::synchronize(true);
7231   _freelistLock->lock_without_safepoint_check();
7232   _bitMap->lock()->lock_without_safepoint_check();
7233   _collector->startTimer();
7234 }
7235 
7236 
7237 //////////////////////////////////////////////////////////////////
7238 // SurvivorSpacePrecleanClosure
7239 //////////////////////////////////////////////////////////////////
7240 // This (single-threaded) closure is used to preclean the oops in
7241 // the survivor spaces.
7242 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7243 
7244   HeapWord* addr = (HeapWord*)p;
7245   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7246   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7247   assert(p->klass_or_null() != NULL, "object should be initializd");
7248   // an initialized object; ignore mark word in verification below
7249   // since we are running concurrent with mutators
7250   assert(p->is_oop(true), "should be an oop");
7251   // Note that we do not yield while we iterate over
7252   // the interior oops of p, pushing the relevant ones
7253   // on our marking stack.
7254   size_t size = p->oop_iterate(_scanning_closure);
7255   do_yield_check();
7256   // Observe that below, we do not abandon the preclean
7257   // phase as soon as we should; rather we empty the
7258   // marking stack before returning. This is to satisfy
7259   // some existing assertions. In general, it may be a
7260   // good idea to abort immediately and complete the marking
7261   // from the grey objects at a later time.
7262   while (!_mark_stack->isEmpty()) {
7263     oop new_oop = _mark_stack->pop();
7264     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7265     assert(_bit_map->isMarked((HeapWord*)new_oop),
7266            "only grey objects on this stack");
7267     // iterate over the oops in this oop, marking and pushing
7268     // the ones in CMS heap (i.e. in _span).
7269     new_oop->oop_iterate(_scanning_closure);
7270     // check if it's time to yield
7271     do_yield_check();
7272   }
7273   unsigned int after_count =
7274     GenCollectedHeap::heap()->total_collections();
7275   bool abort = (_before_count != after_count) ||
7276                _collector->should_abort_preclean();
7277   return abort ? 0 : size;
7278 }
7279 
7280 void SurvivorSpacePrecleanClosure::do_yield_work() {
7281   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7282          "CMS thread should hold CMS token");
7283   assert_lock_strong(_bit_map->lock());
7284   // Relinquish the bit map lock
7285   _bit_map->lock()->unlock();
7286   ConcurrentMarkSweepThread::desynchronize(true);
7287   ConcurrentMarkSweepThread::acknowledge_yield_request();
7288   _collector->stopTimer();
7289   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7290   if (PrintCMSStatistics != 0) {
7291     _collector->incrementYields();
7292   }
7293   _collector->icms_wait();
7294 
7295   // See the comment in coordinator_yield()
7296   for (unsigned i = 0; i < CMSYieldSleepCount &&
7297                        ConcurrentMarkSweepThread::should_yield() &&
7298                        !CMSCollector::foregroundGCIsActive(); ++i) {
7299     os::sleep(Thread::current(), 1, false);
7300     ConcurrentMarkSweepThread::acknowledge_yield_request();
7301   }
7302 
7303   ConcurrentMarkSweepThread::synchronize(true);
7304   _bit_map->lock()->lock_without_safepoint_check();
7305   _collector->startTimer();
7306 }
7307 
7308 // This closure is used to rescan the marked objects on the dirty cards
7309 // in the mod union table and the card table proper. In the parallel
7310 // case, although the bitMap is shared, we do a single read so the
7311 // isMarked() query is "safe".
7312 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7313   // Ignore mark word because we are running concurrent with mutators
7314   assert(p->is_oop_or_null(true), "expected an oop or null");
7315   HeapWord* addr = (HeapWord*)p;
7316   assert(_span.contains(addr), "we are scanning the CMS generation");
7317   bool is_obj_array = false;
7318   #ifdef ASSERT
7319     if (!_parallel) {
7320       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7321       assert(_collector->overflow_list_is_empty(),
7322              "overflow list should be empty");
7323 
7324     }
7325   #endif // ASSERT
7326   if (_bit_map->isMarked(addr)) {
7327     // Obj arrays are precisely marked, non-arrays are not;
7328     // so we scan objArrays precisely and non-arrays in their
7329     // entirety.
7330     if (p->is_objArray()) {
7331       is_obj_array = true;
7332       if (_parallel) {
7333         p->oop_iterate(_par_scan_closure, mr);
7334       } else {
7335         p->oop_iterate(_scan_closure, mr);
7336       }
7337     } else {
7338       if (_parallel) {
7339         p->oop_iterate(_par_scan_closure);
7340       } else {
7341         p->oop_iterate(_scan_closure);
7342       }
7343     }
7344   }
7345   #ifdef ASSERT
7346     if (!_parallel) {
7347       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7348       assert(_collector->overflow_list_is_empty(),
7349              "overflow list should be empty");
7350 
7351     }
7352   #endif // ASSERT
7353   return is_obj_array;
7354 }
7355 
7356 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7357                         MemRegion span,
7358                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
7359                         bool should_yield, bool verifying):
7360   _collector(collector),
7361   _span(span),
7362   _bitMap(bitMap),
7363   _mut(&collector->_modUnionTable),
7364   _markStack(markStack),
7365   _yield(should_yield),
7366   _skipBits(0)
7367 {
7368   assert(_markStack->isEmpty(), "stack should be empty");
7369   _finger = _bitMap->startWord();
7370   _threshold = _finger;
7371   assert(_collector->_restart_addr == NULL, "Sanity check");
7372   assert(_span.contains(_finger), "Out of bounds _finger?");
7373   DEBUG_ONLY(_verifying = verifying;)
7374 }
7375 
7376 void MarkFromRootsClosure::reset(HeapWord* addr) {
7377   assert(_markStack->isEmpty(), "would cause duplicates on stack");
7378   assert(_span.contains(addr), "Out of bounds _finger?");
7379   _finger = addr;
7380   _threshold = (HeapWord*)round_to(
7381                  (intptr_t)_finger, CardTableModRefBS::card_size);
7382 }
7383 
7384 // Should revisit to see if this should be restructured for
7385 // greater efficiency.
7386 bool MarkFromRootsClosure::do_bit(size_t offset) {
7387   if (_skipBits > 0) {
7388     _skipBits--;
7389     return true;
7390   }
7391   // convert offset into a HeapWord*
7392   HeapWord* addr = _bitMap->startWord() + offset;
7393   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7394          "address out of range");
7395   assert(_bitMap->isMarked(addr), "tautology");
7396   if (_bitMap->isMarked(addr+1)) {
7397     // this is an allocated but not yet initialized object
7398     assert(_skipBits == 0, "tautology");
7399     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
7400     oop p = oop(addr);
7401     if (p->klass_or_null() == NULL) {
7402       DEBUG_ONLY(if (!_verifying) {)
7403         // We re-dirty the cards on which this object lies and increase
7404         // the _threshold so that we'll come back to scan this object
7405         // during the preclean or remark phase. (CMSCleanOnEnter)
7406         if (CMSCleanOnEnter) {
7407           size_t sz = _collector->block_size_using_printezis_bits(addr);
7408           HeapWord* end_card_addr   = (HeapWord*)round_to(
7409                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7410           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7411           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7412           // Bump _threshold to end_card_addr; note that
7413           // _threshold cannot possibly exceed end_card_addr, anyhow.
7414           // This prevents future clearing of the card as the scan proceeds
7415           // to the right.
7416           assert(_threshold <= end_card_addr,
7417                  "Because we are just scanning into this object");
7418           if (_threshold < end_card_addr) {
7419             _threshold = end_card_addr;
7420           }
7421           if (p->klass_or_null() != NULL) {
7422             // Redirty the range of cards...
7423             _mut->mark_range(redirty_range);
7424           } // ...else the setting of klass will dirty the card anyway.
7425         }
7426       DEBUG_ONLY(})
7427       return true;
7428     }
7429   }
7430   scanOopsInOop(addr);
7431   return true;
7432 }
7433 
7434 // We take a break if we've been at this for a while,
7435 // so as to avoid monopolizing the locks involved.
7436 void MarkFromRootsClosure::do_yield_work() {
7437   // First give up the locks, then yield, then re-lock
7438   // We should probably use a constructor/destructor idiom to
7439   // do this unlock/lock or modify the MutexUnlocker class to
7440   // serve our purpose. XXX
7441   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7442          "CMS thread should hold CMS token");
7443   assert_lock_strong(_bitMap->lock());
7444   _bitMap->lock()->unlock();
7445   ConcurrentMarkSweepThread::desynchronize(true);
7446   ConcurrentMarkSweepThread::acknowledge_yield_request();
7447   _collector->stopTimer();
7448   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7449   if (PrintCMSStatistics != 0) {
7450     _collector->incrementYields();
7451   }
7452   _collector->icms_wait();
7453 
7454   // See the comment in coordinator_yield()
7455   for (unsigned i = 0; i < CMSYieldSleepCount &&
7456                        ConcurrentMarkSweepThread::should_yield() &&
7457                        !CMSCollector::foregroundGCIsActive(); ++i) {
7458     os::sleep(Thread::current(), 1, false);
7459     ConcurrentMarkSweepThread::acknowledge_yield_request();
7460   }
7461 
7462   ConcurrentMarkSweepThread::synchronize(true);
7463   _bitMap->lock()->lock_without_safepoint_check();
7464   _collector->startTimer();
7465 }
7466 
7467 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7468   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7469   assert(_markStack->isEmpty(),
7470          "should drain stack to limit stack usage");
7471   // convert ptr to an oop preparatory to scanning
7472   oop obj = oop(ptr);
7473   // Ignore mark word in verification below, since we
7474   // may be running concurrent with mutators.
7475   assert(obj->is_oop(true), "should be an oop");
7476   assert(_finger <= ptr, "_finger runneth ahead");
7477   // advance the finger to right end of this object
7478   _finger = ptr + obj->size();
7479   assert(_finger > ptr, "we just incremented it above");
7480   // On large heaps, it may take us some time to get through
7481   // the marking phase (especially if running iCMS). During
7482   // this time it's possible that a lot of mutations have
7483   // accumulated in the card table and the mod union table --
7484   // these mutation records are redundant until we have
7485   // actually traced into the corresponding card.
7486   // Here, we check whether advancing the finger would make
7487   // us cross into a new card, and if so clear corresponding
7488   // cards in the MUT (preclean them in the card-table in the
7489   // future).
7490 
7491   DEBUG_ONLY(if (!_verifying) {)
7492     // The clean-on-enter optimization is disabled by default,
7493     // until we fix 6178663.
7494     if (CMSCleanOnEnter && (_finger > _threshold)) {
7495       // [_threshold, _finger) represents the interval
7496       // of cards to be cleared  in MUT (or precleaned in card table).
7497       // The set of cards to be cleared is all those that overlap
7498       // with the interval [_threshold, _finger); note that
7499       // _threshold is always kept card-aligned but _finger isn't
7500       // always card-aligned.
7501       HeapWord* old_threshold = _threshold;
7502       assert(old_threshold == (HeapWord*)round_to(
7503               (intptr_t)old_threshold, CardTableModRefBS::card_size),
7504              "_threshold should always be card-aligned");
7505       _threshold = (HeapWord*)round_to(
7506                      (intptr_t)_finger, CardTableModRefBS::card_size);
7507       MemRegion mr(old_threshold, _threshold);
7508       assert(!mr.is_empty(), "Control point invariant");
7509       assert(_span.contains(mr), "Should clear within span");
7510       _mut->clear_range(mr);
7511     }
7512   DEBUG_ONLY(})
7513   // Note: the finger doesn't advance while we drain
7514   // the stack below.
7515   PushOrMarkClosure pushOrMarkClosure(_collector,
7516                                       _span, _bitMap, _markStack,
7517                                       _finger, this);
7518   bool res = _markStack->push(obj);
7519   assert(res, "Empty non-zero size stack should have space for single push");
7520   while (!_markStack->isEmpty()) {
7521     oop new_oop = _markStack->pop();
7522     // Skip verifying header mark word below because we are
7523     // running concurrent with mutators.
7524     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7525     // now scan this oop's oops
7526     new_oop->oop_iterate(&pushOrMarkClosure);
7527     do_yield_check();
7528   }
7529   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7530 }
7531 
7532 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7533                        CMSCollector* collector, MemRegion span,
7534                        CMSBitMap* bit_map,
7535                        OopTaskQueue* work_queue,
7536                        CMSMarkStack*  overflow_stack,
7537                        bool should_yield):
7538   _collector(collector),
7539   _whole_span(collector->_span),
7540   _span(span),
7541   _bit_map(bit_map),
7542   _mut(&collector->_modUnionTable),
7543   _work_queue(work_queue),
7544   _overflow_stack(overflow_stack),
7545   _yield(should_yield),
7546   _skip_bits(0),
7547   _task(task)
7548 {
7549   assert(_work_queue->size() == 0, "work_queue should be empty");
7550   _finger = span.start();
7551   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
7552   assert(_span.contains(_finger), "Out of bounds _finger?");
7553 }
7554 
7555 // Should revisit to see if this should be restructured for
7556 // greater efficiency.
7557 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7558   if (_skip_bits > 0) {
7559     _skip_bits--;
7560     return true;
7561   }
7562   // convert offset into a HeapWord*
7563   HeapWord* addr = _bit_map->startWord() + offset;
7564   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7565          "address out of range");
7566   assert(_bit_map->isMarked(addr), "tautology");
7567   if (_bit_map->isMarked(addr+1)) {
7568     // this is an allocated object that might not yet be initialized
7569     assert(_skip_bits == 0, "tautology");
7570     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
7571     oop p = oop(addr);
7572     if (p->klass_or_null() == NULL) {
7573       // in the case of Clean-on-Enter optimization, redirty card
7574       // and avoid clearing card by increasing  the threshold.
7575       return true;
7576     }
7577   }
7578   scan_oops_in_oop(addr);
7579   return true;
7580 }
7581 
7582 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7583   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7584   // Should we assert that our work queue is empty or
7585   // below some drain limit?
7586   assert(_work_queue->size() == 0,
7587          "should drain stack to limit stack usage");
7588   // convert ptr to an oop preparatory to scanning
7589   oop obj = oop(ptr);
7590   // Ignore mark word in verification below, since we
7591   // may be running concurrent with mutators.
7592   assert(obj->is_oop(true), "should be an oop");
7593   assert(_finger <= ptr, "_finger runneth ahead");
7594   // advance the finger to right end of this object
7595   _finger = ptr + obj->size();
7596   assert(_finger > ptr, "we just incremented it above");
7597   // On large heaps, it may take us some time to get through
7598   // the marking phase (especially if running iCMS). During
7599   // this time it's possible that a lot of mutations have
7600   // accumulated in the card table and the mod union table --
7601   // these mutation records are redundant until we have
7602   // actually traced into the corresponding card.
7603   // Here, we check whether advancing the finger would make
7604   // us cross into a new card, and if so clear corresponding
7605   // cards in the MUT (preclean them in the card-table in the
7606   // future).
7607 
7608   // The clean-on-enter optimization is disabled by default,
7609   // until we fix 6178663.
7610   if (CMSCleanOnEnter && (_finger > _threshold)) {
7611     // [_threshold, _finger) represents the interval
7612     // of cards to be cleared  in MUT (or precleaned in card table).
7613     // The set of cards to be cleared is all those that overlap
7614     // with the interval [_threshold, _finger); note that
7615     // _threshold is always kept card-aligned but _finger isn't
7616     // always card-aligned.
7617     HeapWord* old_threshold = _threshold;
7618     assert(old_threshold == (HeapWord*)round_to(
7619             (intptr_t)old_threshold, CardTableModRefBS::card_size),
7620            "_threshold should always be card-aligned");
7621     _threshold = (HeapWord*)round_to(
7622                    (intptr_t)_finger, CardTableModRefBS::card_size);
7623     MemRegion mr(old_threshold, _threshold);
7624     assert(!mr.is_empty(), "Control point invariant");
7625     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7626     _mut->clear_range(mr);
7627   }
7628 
7629   // Note: the local finger doesn't advance while we drain
7630   // the stack below, but the global finger sure can and will.
7631   HeapWord** gfa = _task->global_finger_addr();
7632   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7633                                       _span, _bit_map,
7634                                       _work_queue,
7635                                       _overflow_stack,
7636                                       _finger,
7637                                       gfa, this);
7638   bool res = _work_queue->push(obj);   // overflow could occur here
7639   assert(res, "Will hold once we use workqueues");
7640   while (true) {
7641     oop new_oop;
7642     if (!_work_queue->pop_local(new_oop)) {
7643       // We emptied our work_queue; check if there's stuff that can
7644       // be gotten from the overflow stack.
7645       if (CMSConcMarkingTask::get_work_from_overflow_stack(
7646             _overflow_stack, _work_queue)) {
7647         do_yield_check();
7648         continue;
7649       } else {  // done
7650         break;
7651       }
7652     }
7653     // Skip verifying header mark word below because we are
7654     // running concurrent with mutators.
7655     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7656     // now scan this oop's oops
7657     new_oop->oop_iterate(&pushOrMarkClosure);
7658     do_yield_check();
7659   }
7660   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7661 }
7662 
7663 // Yield in response to a request from VM Thread or
7664 // from mutators.
7665 void Par_MarkFromRootsClosure::do_yield_work() {
7666   assert(_task != NULL, "sanity");
7667   _task->yield();
7668 }
7669 
7670 // A variant of the above used for verifying CMS marking work.
7671 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7672                         MemRegion span,
7673                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7674                         CMSMarkStack*  mark_stack):
7675   _collector(collector),
7676   _span(span),
7677   _verification_bm(verification_bm),
7678   _cms_bm(cms_bm),
7679   _mark_stack(mark_stack),
7680   _pam_verify_closure(collector, span, verification_bm, cms_bm,
7681                       mark_stack)
7682 {
7683   assert(_mark_stack->isEmpty(), "stack should be empty");
7684   _finger = _verification_bm->startWord();
7685   assert(_collector->_restart_addr == NULL, "Sanity check");
7686   assert(_span.contains(_finger), "Out of bounds _finger?");
7687 }
7688 
7689 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7690   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7691   assert(_span.contains(addr), "Out of bounds _finger?");
7692   _finger = addr;
7693 }
7694 
7695 // Should revisit to see if this should be restructured for
7696 // greater efficiency.
7697 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7698   // convert offset into a HeapWord*
7699   HeapWord* addr = _verification_bm->startWord() + offset;
7700   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7701          "address out of range");
7702   assert(_verification_bm->isMarked(addr), "tautology");
7703   assert(_cms_bm->isMarked(addr), "tautology");
7704 
7705   assert(_mark_stack->isEmpty(),
7706          "should drain stack to limit stack usage");
7707   // convert addr to an oop preparatory to scanning
7708   oop obj = oop(addr);
7709   assert(obj->is_oop(), "should be an oop");
7710   assert(_finger <= addr, "_finger runneth ahead");
7711   // advance the finger to right end of this object
7712   _finger = addr + obj->size();
7713   assert(_finger > addr, "we just incremented it above");
7714   // Note: the finger doesn't advance while we drain
7715   // the stack below.
7716   bool res = _mark_stack->push(obj);
7717   assert(res, "Empty non-zero size stack should have space for single push");
7718   while (!_mark_stack->isEmpty()) {
7719     oop new_oop = _mark_stack->pop();
7720     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7721     // now scan this oop's oops
7722     new_oop->oop_iterate(&_pam_verify_closure);
7723   }
7724   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7725   return true;
7726 }
7727 
7728 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7729   CMSCollector* collector, MemRegion span,
7730   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7731   CMSMarkStack*  mark_stack):
7732   CMSOopClosure(collector->ref_processor()),
7733   _collector(collector),
7734   _span(span),
7735   _verification_bm(verification_bm),
7736   _cms_bm(cms_bm),
7737   _mark_stack(mark_stack)
7738 { }
7739 
7740 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
7741 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7742 
7743 // Upon stack overflow, we discard (part of) the stack,
7744 // remembering the least address amongst those discarded
7745 // in CMSCollector's _restart_address.
7746 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7747   // Remember the least grey address discarded
7748   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7749   _collector->lower_restart_addr(ra);
7750   _mark_stack->reset();  // discard stack contents
7751   _mark_stack->expand(); // expand the stack if possible
7752 }
7753 
7754 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7755   assert(obj->is_oop_or_null(), "expected an oop or NULL");
7756   HeapWord* addr = (HeapWord*)obj;
7757   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7758     // Oop lies in _span and isn't yet grey or black
7759     _verification_bm->mark(addr);            // now grey
7760     if (!_cms_bm->isMarked(addr)) {
7761       oop(addr)->print();
7762       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7763                              addr);
7764       fatal("... aborting");
7765     }
7766 
7767     if (!_mark_stack->push(obj)) { // stack overflow
7768       if (PrintCMSStatistics != 0) {
7769         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7770                                SIZE_FORMAT, _mark_stack->capacity());
7771       }
7772       assert(_mark_stack->isFull(), "Else push should have succeeded");
7773       handle_stack_overflow(addr);
7774     }
7775     // anything including and to the right of _finger
7776     // will be scanned as we iterate over the remainder of the
7777     // bit map
7778   }
7779 }
7780 
7781 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7782                      MemRegion span,
7783                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7784                      HeapWord* finger, MarkFromRootsClosure* parent) :
7785   CMSOopClosure(collector->ref_processor()),
7786   _collector(collector),
7787   _span(span),
7788   _bitMap(bitMap),
7789   _markStack(markStack),
7790   _finger(finger),
7791   _parent(parent)
7792 { }
7793 
7794 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7795                      MemRegion span,
7796                      CMSBitMap* bit_map,
7797                      OopTaskQueue* work_queue,
7798                      CMSMarkStack*  overflow_stack,
7799                      HeapWord* finger,
7800                      HeapWord** global_finger_addr,
7801                      Par_MarkFromRootsClosure* parent) :
7802   CMSOopClosure(collector->ref_processor()),
7803   _collector(collector),
7804   _whole_span(collector->_span),
7805   _span(span),
7806   _bit_map(bit_map),
7807   _work_queue(work_queue),
7808   _overflow_stack(overflow_stack),
7809   _finger(finger),
7810   _global_finger_addr(global_finger_addr),
7811   _parent(parent)
7812 { }
7813 
7814 // Assumes thread-safe access by callers, who are
7815 // responsible for mutual exclusion.
7816 void CMSCollector::lower_restart_addr(HeapWord* low) {
7817   assert(_span.contains(low), "Out of bounds addr");
7818   if (_restart_addr == NULL) {
7819     _restart_addr = low;
7820   } else {
7821     _restart_addr = MIN2(_restart_addr, low);
7822   }
7823 }
7824 
7825 // Upon stack overflow, we discard (part of) the stack,
7826 // remembering the least address amongst those discarded
7827 // in CMSCollector's _restart_address.
7828 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7829   // Remember the least grey address discarded
7830   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7831   _collector->lower_restart_addr(ra);
7832   _markStack->reset();  // discard stack contents
7833   _markStack->expand(); // expand the stack if possible
7834 }
7835 
7836 // Upon stack overflow, we discard (part of) the stack,
7837 // remembering the least address amongst those discarded
7838 // in CMSCollector's _restart_address.
7839 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7840   // We need to do this under a mutex to prevent other
7841   // workers from interfering with the work done below.
7842   MutexLockerEx ml(_overflow_stack->par_lock(),
7843                    Mutex::_no_safepoint_check_flag);
7844   // Remember the least grey address discarded
7845   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7846   _collector->lower_restart_addr(ra);
7847   _overflow_stack->reset();  // discard stack contents
7848   _overflow_stack->expand(); // expand the stack if possible
7849 }
7850 
7851 void CMKlassClosure::do_klass(Klass* k) {
7852   assert(_oop_closure != NULL, "Not initialized?");
7853   k->oops_do(_oop_closure);
7854 }
7855 
7856 void PushOrMarkClosure::do_oop(oop obj) {
7857   // Ignore mark word because we are running concurrent with mutators.
7858   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7859   HeapWord* addr = (HeapWord*)obj;
7860   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7861     // Oop lies in _span and isn't yet grey or black
7862     _bitMap->mark(addr);            // now grey
7863     if (addr < _finger) {
7864       // the bit map iteration has already either passed, or
7865       // sampled, this bit in the bit map; we'll need to
7866       // use the marking stack to scan this oop's oops.
7867       bool simulate_overflow = false;
7868       NOT_PRODUCT(
7869         if (CMSMarkStackOverflowALot &&
7870             _collector->simulate_overflow()) {
7871           // simulate a stack overflow
7872           simulate_overflow = true;
7873         }
7874       )
7875       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7876         if (PrintCMSStatistics != 0) {
7877           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7878                                  SIZE_FORMAT, _markStack->capacity());
7879         }
7880         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7881         handle_stack_overflow(addr);
7882       }
7883     }
7884     // anything including and to the right of _finger
7885     // will be scanned as we iterate over the remainder of the
7886     // bit map
7887     do_yield_check();
7888   }
7889 }
7890 
7891 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7892 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7893 
7894 void Par_PushOrMarkClosure::do_oop(oop obj) {
7895   // Ignore mark word because we are running concurrent with mutators.
7896   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7897   HeapWord* addr = (HeapWord*)obj;
7898   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7899     // Oop lies in _span and isn't yet grey or black
7900     // We read the global_finger (volatile read) strictly after marking oop
7901     bool res = _bit_map->par_mark(addr);    // now grey
7902     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7903     // Should we push this marked oop on our stack?
7904     // -- if someone else marked it, nothing to do
7905     // -- if target oop is above global finger nothing to do
7906     // -- if target oop is in chunk and above local finger
7907     //      then nothing to do
7908     // -- else push on work queue
7909     if (   !res       // someone else marked it, they will deal with it
7910         || (addr >= *gfa)  // will be scanned in a later task
7911         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7912       return;
7913     }
7914     // the bit map iteration has already either passed, or
7915     // sampled, this bit in the bit map; we'll need to
7916     // use the marking stack to scan this oop's oops.
7917     bool simulate_overflow = false;
7918     NOT_PRODUCT(
7919       if (CMSMarkStackOverflowALot &&
7920           _collector->simulate_overflow()) {
7921         // simulate a stack overflow
7922         simulate_overflow = true;
7923       }
7924     )
7925     if (simulate_overflow ||
7926         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7927       // stack overflow
7928       if (PrintCMSStatistics != 0) {
7929         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7930                                SIZE_FORMAT, _overflow_stack->capacity());
7931       }
7932       // We cannot assert that the overflow stack is full because
7933       // it may have been emptied since.
7934       assert(simulate_overflow ||
7935              _work_queue->size() == _work_queue->max_elems(),
7936             "Else push should have succeeded");
7937       handle_stack_overflow(addr);
7938     }
7939     do_yield_check();
7940   }
7941 }
7942 
7943 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7944 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7945 
7946 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7947                                        MemRegion span,
7948                                        ReferenceProcessor* rp,
7949                                        CMSBitMap* bit_map,
7950                                        CMSBitMap* mod_union_table,
7951                                        CMSMarkStack*  mark_stack,
7952                                        bool           concurrent_precleaning):
7953   CMSOopClosure(rp),
7954   _collector(collector),
7955   _span(span),
7956   _bit_map(bit_map),
7957   _mod_union_table(mod_union_table),
7958   _mark_stack(mark_stack),
7959   _concurrent_precleaning(concurrent_precleaning)
7960 {
7961   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7962 }
7963 
7964 // Grey object rescan during pre-cleaning and second checkpoint phases --
7965 // the non-parallel version (the parallel version appears further below.)
7966 void PushAndMarkClosure::do_oop(oop obj) {
7967   // Ignore mark word verification. If during concurrent precleaning,
7968   // the object monitor may be locked. If during the checkpoint
7969   // phases, the object may already have been reached by a  different
7970   // path and may be at the end of the global overflow list (so
7971   // the mark word may be NULL).
7972   assert(obj->is_oop_or_null(true /* ignore mark word */),
7973          "expected an oop or NULL");
7974   HeapWord* addr = (HeapWord*)obj;
7975   // Check if oop points into the CMS generation
7976   // and is not marked
7977   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7978     // a white object ...
7979     _bit_map->mark(addr);         // ... now grey
7980     // push on the marking stack (grey set)
7981     bool simulate_overflow = false;
7982     NOT_PRODUCT(
7983       if (CMSMarkStackOverflowALot &&
7984           _collector->simulate_overflow()) {
7985         // simulate a stack overflow
7986         simulate_overflow = true;
7987       }
7988     )
7989     if (simulate_overflow || !_mark_stack->push(obj)) {
7990       if (_concurrent_precleaning) {
7991          // During precleaning we can just dirty the appropriate card(s)
7992          // in the mod union table, thus ensuring that the object remains
7993          // in the grey set  and continue. In the case of object arrays
7994          // we need to dirty all of the cards that the object spans,
7995          // since the rescan of object arrays will be limited to the
7996          // dirty cards.
7997          // Note that no one can be intefering with us in this action
7998          // of dirtying the mod union table, so no locking or atomics
7999          // are required.
8000          if (obj->is_objArray()) {
8001            size_t sz = obj->size();
8002            HeapWord* end_card_addr = (HeapWord*)round_to(
8003                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
8004            MemRegion redirty_range = MemRegion(addr, end_card_addr);
8005            assert(!redirty_range.is_empty(), "Arithmetical tautology");
8006            _mod_union_table->mark_range(redirty_range);
8007          } else {
8008            _mod_union_table->mark(addr);
8009          }
8010          _collector->_ser_pmc_preclean_ovflw++;
8011       } else {
8012          // During the remark phase, we need to remember this oop
8013          // in the overflow list.
8014          _collector->push_on_overflow_list(obj);
8015          _collector->_ser_pmc_remark_ovflw++;
8016       }
8017     }
8018   }
8019 }
8020 
8021 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8022                                                MemRegion span,
8023                                                ReferenceProcessor* rp,
8024                                                CMSBitMap* bit_map,
8025                                                OopTaskQueue* work_queue):
8026   CMSOopClosure(rp),
8027   _collector(collector),
8028   _span(span),
8029   _bit_map(bit_map),
8030   _work_queue(work_queue)
8031 {
8032   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8033 }
8034 
8035 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
8036 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8037 
8038 // Grey object rescan during second checkpoint phase --
8039 // the parallel version.
8040 void Par_PushAndMarkClosure::do_oop(oop obj) {
8041   // In the assert below, we ignore the mark word because
8042   // this oop may point to an already visited object that is
8043   // on the overflow stack (in which case the mark word has
8044   // been hijacked for chaining into the overflow stack --
8045   // if this is the last object in the overflow stack then
8046   // its mark word will be NULL). Because this object may
8047   // have been subsequently popped off the global overflow
8048   // stack, and the mark word possibly restored to the prototypical
8049   // value, by the time we get to examined this failing assert in
8050   // the debugger, is_oop_or_null(false) may subsequently start
8051   // to hold.
8052   assert(obj->is_oop_or_null(true),
8053          "expected an oop or NULL");
8054   HeapWord* addr = (HeapWord*)obj;
8055   // Check if oop points into the CMS generation
8056   // and is not marked
8057   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8058     // a white object ...
8059     // If we manage to "claim" the object, by being the
8060     // first thread to mark it, then we push it on our
8061     // marking stack
8062     if (_bit_map->par_mark(addr)) {     // ... now grey
8063       // push on work queue (grey set)
8064       bool simulate_overflow = false;
8065       NOT_PRODUCT(
8066         if (CMSMarkStackOverflowALot &&
8067             _collector->par_simulate_overflow()) {
8068           // simulate a stack overflow
8069           simulate_overflow = true;
8070         }
8071       )
8072       if (simulate_overflow || !_work_queue->push(obj)) {
8073         _collector->par_push_on_overflow_list(obj);
8074         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
8075       }
8076     } // Else, some other thread got there first
8077   }
8078 }
8079 
8080 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
8081 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8082 
8083 void CMSPrecleanRefsYieldClosure::do_yield_work() {
8084   Mutex* bml = _collector->bitMapLock();
8085   assert_lock_strong(bml);
8086   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8087          "CMS thread should hold CMS token");
8088 
8089   bml->unlock();
8090   ConcurrentMarkSweepThread::desynchronize(true);
8091 
8092   ConcurrentMarkSweepThread::acknowledge_yield_request();
8093 
8094   _collector->stopTimer();
8095   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8096   if (PrintCMSStatistics != 0) {
8097     _collector->incrementYields();
8098   }
8099   _collector->icms_wait();
8100 
8101   // See the comment in coordinator_yield()
8102   for (unsigned i = 0; i < CMSYieldSleepCount &&
8103                        ConcurrentMarkSweepThread::should_yield() &&
8104                        !CMSCollector::foregroundGCIsActive(); ++i) {
8105     os::sleep(Thread::current(), 1, false);
8106     ConcurrentMarkSweepThread::acknowledge_yield_request();
8107   }
8108 
8109   ConcurrentMarkSweepThread::synchronize(true);
8110   bml->lock();
8111 
8112   _collector->startTimer();
8113 }
8114 
8115 bool CMSPrecleanRefsYieldClosure::should_return() {
8116   if (ConcurrentMarkSweepThread::should_yield()) {
8117     do_yield_work();
8118   }
8119   return _collector->foregroundGCIsActive();
8120 }
8121 
8122 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8123   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8124          "mr should be aligned to start at a card boundary");
8125   // We'd like to assert:
8126   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8127   //        "mr should be a range of cards");
8128   // However, that would be too strong in one case -- the last
8129   // partition ends at _unallocated_block which, in general, can be
8130   // an arbitrary boundary, not necessarily card aligned.
8131   if (PrintCMSStatistics != 0) {
8132     _num_dirty_cards +=
8133          mr.word_size()/CardTableModRefBS::card_size_in_words;
8134   }
8135   _space->object_iterate_mem(mr, &_scan_cl);
8136 }
8137 
8138 SweepClosure::SweepClosure(CMSCollector* collector,
8139                            ConcurrentMarkSweepGeneration* g,
8140                            CMSBitMap* bitMap, bool should_yield) :
8141   _collector(collector),
8142   _g(g),
8143   _sp(g->cmsSpace()),
8144   _limit(_sp->sweep_limit()),
8145   _freelistLock(_sp->freelistLock()),
8146   _bitMap(bitMap),
8147   _yield(should_yield),
8148   _inFreeRange(false),           // No free range at beginning of sweep
8149   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
8150   _lastFreeRangeCoalesced(false),
8151   _freeFinger(g->used_region().start())
8152 {
8153   NOT_PRODUCT(
8154     _numObjectsFreed = 0;
8155     _numWordsFreed   = 0;
8156     _numObjectsLive = 0;
8157     _numWordsLive = 0;
8158     _numObjectsAlreadyFree = 0;
8159     _numWordsAlreadyFree = 0;
8160     _last_fc = NULL;
8161 
8162     _sp->initializeIndexedFreeListArrayReturnedBytes();
8163     _sp->dictionary()->initialize_dict_returned_bytes();
8164   )
8165   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8166          "sweep _limit out of bounds");
8167   if (CMSTraceSweeper) {
8168     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8169                         _limit);
8170   }
8171 }
8172 
8173 void SweepClosure::print_on(outputStream* st) const {
8174   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8175                 _sp->bottom(), _sp->end());
8176   tty->print_cr("_limit = " PTR_FORMAT, _limit);
8177   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8178   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8179   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8180                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8181 }
8182 
8183 #ifndef PRODUCT
8184 // Assertion checking only:  no useful work in product mode --
8185 // however, if any of the flags below become product flags,
8186 // you may need to review this code to see if it needs to be
8187 // enabled in product mode.
8188 SweepClosure::~SweepClosure() {
8189   assert_lock_strong(_freelistLock);
8190   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8191          "sweep _limit out of bounds");
8192   if (inFreeRange()) {
8193     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8194     print();
8195     ShouldNotReachHere();
8196   }
8197   if (Verbose && PrintGC) {
8198     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8199                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8200     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
8201                            SIZE_FORMAT" bytes  "
8202       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8203       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8204       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8205     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8206                         * sizeof(HeapWord);
8207     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8208 
8209     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8210       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8211       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8212       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8213       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8214       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
8215         indexListReturnedBytes);
8216       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
8217         dict_returned_bytes);
8218     }
8219   }
8220   if (CMSTraceSweeper) {
8221     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8222                            _limit);
8223   }
8224 }
8225 #endif  // PRODUCT
8226 
8227 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8228     bool freeRangeInFreeLists) {
8229   if (CMSTraceSweeper) {
8230     gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8231                freeFinger, freeRangeInFreeLists);
8232   }
8233   assert(!inFreeRange(), "Trampling existing free range");
8234   set_inFreeRange(true);
8235   set_lastFreeRangeCoalesced(false);
8236 
8237   set_freeFinger(freeFinger);
8238   set_freeRangeInFreeLists(freeRangeInFreeLists);
8239   if (CMSTestInFreeList) {
8240     if (freeRangeInFreeLists) {
8241       FreeChunk* fc = (FreeChunk*) freeFinger;
8242       assert(fc->is_free(), "A chunk on the free list should be free.");
8243       assert(fc->size() > 0, "Free range should have a size");
8244       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8245     }
8246   }
8247 }
8248 
8249 // Note that the sweeper runs concurrently with mutators. Thus,
8250 // it is possible for direct allocation in this generation to happen
8251 // in the middle of the sweep. Note that the sweeper also coalesces
8252 // contiguous free blocks. Thus, unless the sweeper and the allocator
8253 // synchronize appropriately freshly allocated blocks may get swept up.
8254 // This is accomplished by the sweeper locking the free lists while
8255 // it is sweeping. Thus blocks that are determined to be free are
8256 // indeed free. There is however one additional complication:
8257 // blocks that have been allocated since the final checkpoint and
8258 // mark, will not have been marked and so would be treated as
8259 // unreachable and swept up. To prevent this, the allocator marks
8260 // the bit map when allocating during the sweep phase. This leads,
8261 // however, to a further complication -- objects may have been allocated
8262 // but not yet initialized -- in the sense that the header isn't yet
8263 // installed. The sweeper can not then determine the size of the block
8264 // in order to skip over it. To deal with this case, we use a technique
8265 // (due to Printezis) to encode such uninitialized block sizes in the
8266 // bit map. Since the bit map uses a bit per every HeapWord, but the
8267 // CMS generation has a minimum object size of 3 HeapWords, it follows
8268 // that "normal marks" won't be adjacent in the bit map (there will
8269 // always be at least two 0 bits between successive 1 bits). We make use
8270 // of these "unused" bits to represent uninitialized blocks -- the bit
8271 // corresponding to the start of the uninitialized object and the next
8272 // bit are both set. Finally, a 1 bit marks the end of the object that
8273 // started with the two consecutive 1 bits to indicate its potentially
8274 // uninitialized state.
8275 
8276 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8277   FreeChunk* fc = (FreeChunk*)addr;
8278   size_t res;
8279 
8280   // Check if we are done sweeping. Below we check "addr >= _limit" rather
8281   // than "addr == _limit" because although _limit was a block boundary when
8282   // we started the sweep, it may no longer be one because heap expansion
8283   // may have caused us to coalesce the block ending at the address _limit
8284   // with a newly expanded chunk (this happens when _limit was set to the
8285   // previous _end of the space), so we may have stepped past _limit:
8286   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8287   if (addr >= _limit) { // we have swept up to or past the limit: finish up
8288     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8289            "sweep _limit out of bounds");
8290     assert(addr < _sp->end(), "addr out of bounds");
8291     // Flush any free range we might be holding as a single
8292     // coalesced chunk to the appropriate free list.
8293     if (inFreeRange()) {
8294       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8295              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8296       flush_cur_free_chunk(freeFinger(),
8297                            pointer_delta(addr, freeFinger()));
8298       if (CMSTraceSweeper) {
8299         gclog_or_tty->print("Sweep: last chunk: ");
8300         gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8301                    "[coalesced:"SIZE_FORMAT"]\n",
8302                    freeFinger(), pointer_delta(addr, freeFinger()),
8303                    lastFreeRangeCoalesced());
8304       }
8305     }
8306 
8307     // help the iterator loop finish
8308     return pointer_delta(_sp->end(), addr);
8309   }
8310 
8311   assert(addr < _limit, "sweep invariant");
8312   // check if we should yield
8313   do_yield_check(addr);
8314   if (fc->is_free()) {
8315     // Chunk that is already free
8316     res = fc->size();
8317     do_already_free_chunk(fc);
8318     debug_only(_sp->verifyFreeLists());
8319     // If we flush the chunk at hand in lookahead_and_flush()
8320     // and it's coalesced with a preceding chunk, then the
8321     // process of "mangling" the payload of the coalesced block
8322     // will cause erasure of the size information from the
8323     // (erstwhile) header of all the coalesced blocks but the
8324     // first, so the first disjunct in the assert will not hold
8325     // in that specific case (in which case the second disjunct
8326     // will hold).
8327     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8328            "Otherwise the size info doesn't change at this step");
8329     NOT_PRODUCT(
8330       _numObjectsAlreadyFree++;
8331       _numWordsAlreadyFree += res;
8332     )
8333     NOT_PRODUCT(_last_fc = fc;)
8334   } else if (!_bitMap->isMarked(addr)) {
8335     // Chunk is fresh garbage
8336     res = do_garbage_chunk(fc);
8337     debug_only(_sp->verifyFreeLists());
8338     NOT_PRODUCT(
8339       _numObjectsFreed++;
8340       _numWordsFreed += res;
8341     )
8342   } else {
8343     // Chunk that is alive.
8344     res = do_live_chunk(fc);
8345     debug_only(_sp->verifyFreeLists());
8346     NOT_PRODUCT(
8347         _numObjectsLive++;
8348         _numWordsLive += res;
8349     )
8350   }
8351   return res;
8352 }
8353 
8354 // For the smart allocation, record following
8355 //  split deaths - a free chunk is removed from its free list because
8356 //      it is being split into two or more chunks.
8357 //  split birth - a free chunk is being added to its free list because
8358 //      a larger free chunk has been split and resulted in this free chunk.
8359 //  coal death - a free chunk is being removed from its free list because
8360 //      it is being coalesced into a large free chunk.
8361 //  coal birth - a free chunk is being added to its free list because
8362 //      it was created when two or more free chunks where coalesced into
8363 //      this free chunk.
8364 //
8365 // These statistics are used to determine the desired number of free
8366 // chunks of a given size.  The desired number is chosen to be relative
8367 // to the end of a CMS sweep.  The desired number at the end of a sweep
8368 // is the
8369 //      count-at-end-of-previous-sweep (an amount that was enough)
8370 //              - count-at-beginning-of-current-sweep  (the excess)
8371 //              + split-births  (gains in this size during interval)
8372 //              - split-deaths  (demands on this size during interval)
8373 // where the interval is from the end of one sweep to the end of the
8374 // next.
8375 //
8376 // When sweeping the sweeper maintains an accumulated chunk which is
8377 // the chunk that is made up of chunks that have been coalesced.  That
8378 // will be termed the left-hand chunk.  A new chunk of garbage that
8379 // is being considered for coalescing will be referred to as the
8380 // right-hand chunk.
8381 //
8382 // When making a decision on whether to coalesce a right-hand chunk with
8383 // the current left-hand chunk, the current count vs. the desired count
8384 // of the left-hand chunk is considered.  Also if the right-hand chunk
8385 // is near the large chunk at the end of the heap (see
8386 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8387 // left-hand chunk is coalesced.
8388 //
8389 // When making a decision about whether to split a chunk, the desired count
8390 // vs. the current count of the candidate to be split is also considered.
8391 // If the candidate is underpopulated (currently fewer chunks than desired)
8392 // a chunk of an overpopulated (currently more chunks than desired) size may
8393 // be chosen.  The "hint" associated with a free list, if non-null, points
8394 // to a free list which may be overpopulated.
8395 //
8396 
8397 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8398   const size_t size = fc->size();
8399   // Chunks that cannot be coalesced are not in the
8400   // free lists.
8401   if (CMSTestInFreeList && !fc->cantCoalesce()) {
8402     assert(_sp->verify_chunk_in_free_list(fc),
8403       "free chunk should be in free lists");
8404   }
8405   // a chunk that is already free, should not have been
8406   // marked in the bit map
8407   HeapWord* const addr = (HeapWord*) fc;
8408   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8409   // Verify that the bit map has no bits marked between
8410   // addr and purported end of this block.
8411   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8412 
8413   // Some chunks cannot be coalesced under any circumstances.
8414   // See the definition of cantCoalesce().
8415   if (!fc->cantCoalesce()) {
8416     // This chunk can potentially be coalesced.
8417     if (_sp->adaptive_freelists()) {
8418       // All the work is done in
8419       do_post_free_or_garbage_chunk(fc, size);
8420     } else {  // Not adaptive free lists
8421       // this is a free chunk that can potentially be coalesced by the sweeper;
8422       if (!inFreeRange()) {
8423         // if the next chunk is a free block that can't be coalesced
8424         // it doesn't make sense to remove this chunk from the free lists
8425         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8426         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8427         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
8428             nextChunk->is_free()               &&     // ... which is free...
8429             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
8430           // nothing to do
8431         } else {
8432           // Potentially the start of a new free range:
8433           // Don't eagerly remove it from the free lists.
8434           // No need to remove it if it will just be put
8435           // back again.  (Also from a pragmatic point of view
8436           // if it is a free block in a region that is beyond
8437           // any allocated blocks, an assertion will fail)
8438           // Remember the start of a free run.
8439           initialize_free_range(addr, true);
8440           // end - can coalesce with next chunk
8441         }
8442       } else {
8443         // the midst of a free range, we are coalescing
8444         print_free_block_coalesced(fc);
8445         if (CMSTraceSweeper) {
8446           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
8447         }
8448         // remove it from the free lists
8449         _sp->removeFreeChunkFromFreeLists(fc);
8450         set_lastFreeRangeCoalesced(true);
8451         // If the chunk is being coalesced and the current free range is
8452         // in the free lists, remove the current free range so that it
8453         // will be returned to the free lists in its entirety - all
8454         // the coalesced pieces included.
8455         if (freeRangeInFreeLists()) {
8456           FreeChunk* ffc = (FreeChunk*) freeFinger();
8457           assert(ffc->size() == pointer_delta(addr, freeFinger()),
8458             "Size of free range is inconsistent with chunk size.");
8459           if (CMSTestInFreeList) {
8460             assert(_sp->verify_chunk_in_free_list(ffc),
8461               "free range is not in free lists");
8462           }
8463           _sp->removeFreeChunkFromFreeLists(ffc);
8464           set_freeRangeInFreeLists(false);
8465         }
8466       }
8467     }
8468     // Note that if the chunk is not coalescable (the else arm
8469     // below), we unconditionally flush, without needing to do
8470     // a "lookahead," as we do below.
8471     if (inFreeRange()) lookahead_and_flush(fc, size);
8472   } else {
8473     // Code path common to both original and adaptive free lists.
8474 
8475     // cant coalesce with previous block; this should be treated
8476     // as the end of a free run if any
8477     if (inFreeRange()) {
8478       // we kicked some butt; time to pick up the garbage
8479       assert(freeFinger() < addr, "freeFinger points too high");
8480       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8481     }
8482     // else, nothing to do, just continue
8483   }
8484 }
8485 
8486 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8487   // This is a chunk of garbage.  It is not in any free list.
8488   // Add it to a free list or let it possibly be coalesced into
8489   // a larger chunk.
8490   HeapWord* const addr = (HeapWord*) fc;
8491   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8492 
8493   if (_sp->adaptive_freelists()) {
8494     // Verify that the bit map has no bits marked between
8495     // addr and purported end of just dead object.
8496     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8497 
8498     do_post_free_or_garbage_chunk(fc, size);
8499   } else {
8500     if (!inFreeRange()) {
8501       // start of a new free range
8502       assert(size > 0, "A free range should have a size");
8503       initialize_free_range(addr, false);
8504     } else {
8505       // this will be swept up when we hit the end of the
8506       // free range
8507       if (CMSTraceSweeper) {
8508         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
8509       }
8510       // If the chunk is being coalesced and the current free range is
8511       // in the free lists, remove the current free range so that it
8512       // will be returned to the free lists in its entirety - all
8513       // the coalesced pieces included.
8514       if (freeRangeInFreeLists()) {
8515         FreeChunk* ffc = (FreeChunk*)freeFinger();
8516         assert(ffc->size() == pointer_delta(addr, freeFinger()),
8517           "Size of free range is inconsistent with chunk size.");
8518         if (CMSTestInFreeList) {
8519           assert(_sp->verify_chunk_in_free_list(ffc),
8520             "free range is not in free lists");
8521         }
8522         _sp->removeFreeChunkFromFreeLists(ffc);
8523         set_freeRangeInFreeLists(false);
8524       }
8525       set_lastFreeRangeCoalesced(true);
8526     }
8527     // this will be swept up when we hit the end of the free range
8528 
8529     // Verify that the bit map has no bits marked between
8530     // addr and purported end of just dead object.
8531     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8532   }
8533   assert(_limit >= addr + size,
8534          "A freshly garbage chunk can't possibly straddle over _limit");
8535   if (inFreeRange()) lookahead_and_flush(fc, size);
8536   return size;
8537 }
8538 
8539 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8540   HeapWord* addr = (HeapWord*) fc;
8541   // The sweeper has just found a live object. Return any accumulated
8542   // left hand chunk to the free lists.
8543   if (inFreeRange()) {
8544     assert(freeFinger() < addr, "freeFinger points too high");
8545     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8546   }
8547 
8548   // This object is live: we'd normally expect this to be
8549   // an oop, and like to assert the following:
8550   // assert(oop(addr)->is_oop(), "live block should be an oop");
8551   // However, as we commented above, this may be an object whose
8552   // header hasn't yet been initialized.
8553   size_t size;
8554   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8555   if (_bitMap->isMarked(addr + 1)) {
8556     // Determine the size from the bit map, rather than trying to
8557     // compute it from the object header.
8558     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8559     size = pointer_delta(nextOneAddr + 1, addr);
8560     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8561            "alignment problem");
8562 
8563 #ifdef ASSERT
8564       if (oop(addr)->klass_or_null() != NULL) {
8565         // Ignore mark word because we are running concurrent with mutators
8566         assert(oop(addr)->is_oop(true), "live block should be an oop");
8567         assert(size ==
8568                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8569                "P-mark and computed size do not agree");
8570       }
8571 #endif
8572 
8573   } else {
8574     // This should be an initialized object that's alive.
8575     assert(oop(addr)->klass_or_null() != NULL,
8576            "Should be an initialized object");
8577     // Ignore mark word because we are running concurrent with mutators
8578     assert(oop(addr)->is_oop(true), "live block should be an oop");
8579     // Verify that the bit map has no bits marked between
8580     // addr and purported end of this block.
8581     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8582     assert(size >= 3, "Necessary for Printezis marks to work");
8583     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8584     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8585   }
8586   return size;
8587 }
8588 
8589 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8590                                                  size_t chunkSize) {
8591   // do_post_free_or_garbage_chunk() should only be called in the case
8592   // of the adaptive free list allocator.
8593   const bool fcInFreeLists = fc->is_free();
8594   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8595   assert((HeapWord*)fc <= _limit, "sweep invariant");
8596   if (CMSTestInFreeList && fcInFreeLists) {
8597     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8598   }
8599 
8600   if (CMSTraceSweeper) {
8601     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8602   }
8603 
8604   HeapWord* const fc_addr = (HeapWord*) fc;
8605 
8606   bool coalesce;
8607   const size_t left  = pointer_delta(fc_addr, freeFinger());
8608   const size_t right = chunkSize;
8609   switch (FLSCoalescePolicy) {
8610     // numeric value forms a coalition aggressiveness metric
8611     case 0:  { // never coalesce
8612       coalesce = false;
8613       break;
8614     }
8615     case 1: { // coalesce if left & right chunks on overpopulated lists
8616       coalesce = _sp->coalOverPopulated(left) &&
8617                  _sp->coalOverPopulated(right);
8618       break;
8619     }
8620     case 2: { // coalesce if left chunk on overpopulated list (default)
8621       coalesce = _sp->coalOverPopulated(left);
8622       break;
8623     }
8624     case 3: { // coalesce if left OR right chunk on overpopulated list
8625       coalesce = _sp->coalOverPopulated(left) ||
8626                  _sp->coalOverPopulated(right);
8627       break;
8628     }
8629     case 4: { // always coalesce
8630       coalesce = true;
8631       break;
8632     }
8633     default:
8634      ShouldNotReachHere();
8635   }
8636 
8637   // Should the current free range be coalesced?
8638   // If the chunk is in a free range and either we decided to coalesce above
8639   // or the chunk is near the large block at the end of the heap
8640   // (isNearLargestChunk() returns true), then coalesce this chunk.
8641   const bool doCoalesce = inFreeRange()
8642                           && (coalesce || _g->isNearLargestChunk(fc_addr));
8643   if (doCoalesce) {
8644     // Coalesce the current free range on the left with the new
8645     // chunk on the right.  If either is on a free list,
8646     // it must be removed from the list and stashed in the closure.
8647     if (freeRangeInFreeLists()) {
8648       FreeChunk* const ffc = (FreeChunk*)freeFinger();
8649       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8650         "Size of free range is inconsistent with chunk size.");
8651       if (CMSTestInFreeList) {
8652         assert(_sp->verify_chunk_in_free_list(ffc),
8653           "Chunk is not in free lists");
8654       }
8655       _sp->coalDeath(ffc->size());
8656       _sp->removeFreeChunkFromFreeLists(ffc);
8657       set_freeRangeInFreeLists(false);
8658     }
8659     if (fcInFreeLists) {
8660       _sp->coalDeath(chunkSize);
8661       assert(fc->size() == chunkSize,
8662         "The chunk has the wrong size or is not in the free lists");
8663       _sp->removeFreeChunkFromFreeLists(fc);
8664     }
8665     set_lastFreeRangeCoalesced(true);
8666     print_free_block_coalesced(fc);
8667   } else {  // not in a free range and/or should not coalesce
8668     // Return the current free range and start a new one.
8669     if (inFreeRange()) {
8670       // In a free range but cannot coalesce with the right hand chunk.
8671       // Put the current free range into the free lists.
8672       flush_cur_free_chunk(freeFinger(),
8673                            pointer_delta(fc_addr, freeFinger()));
8674     }
8675     // Set up for new free range.  Pass along whether the right hand
8676     // chunk is in the free lists.
8677     initialize_free_range((HeapWord*)fc, fcInFreeLists);
8678   }
8679 }
8680 
8681 // Lookahead flush:
8682 // If we are tracking a free range, and this is the last chunk that
8683 // we'll look at because its end crosses past _limit, we'll preemptively
8684 // flush it along with any free range we may be holding on to. Note that
8685 // this can be the case only for an already free or freshly garbage
8686 // chunk. If this block is an object, it can never straddle
8687 // over _limit. The "straddling" occurs when _limit is set at
8688 // the previous end of the space when this cycle started, and
8689 // a subsequent heap expansion caused the previously co-terminal
8690 // free block to be coalesced with the newly expanded portion,
8691 // thus rendering _limit a non-block-boundary making it dangerous
8692 // for the sweeper to step over and examine.
8693 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8694   assert(inFreeRange(), "Should only be called if currently in a free range.");
8695   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8696   assert(_sp->used_region().contains(eob - 1),
8697          err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8698                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8699                  _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8700   if (eob >= _limit) {
8701     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8702     if (CMSTraceSweeper) {
8703       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8704                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8705                              "[" PTR_FORMAT "," PTR_FORMAT ")",
8706                              _limit, fc, eob, _sp->bottom(), _sp->end());
8707     }
8708     // Return the storage we are tracking back into the free lists.
8709     if (CMSTraceSweeper) {
8710       gclog_or_tty->print_cr("Flushing ... ");
8711     }
8712     assert(freeFinger() < eob, "Error");
8713     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8714   }
8715 }
8716 
8717 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8718   assert(inFreeRange(), "Should only be called if currently in a free range.");
8719   assert(size > 0,
8720     "A zero sized chunk cannot be added to the free lists.");
8721   if (!freeRangeInFreeLists()) {
8722     if (CMSTestInFreeList) {
8723       FreeChunk* fc = (FreeChunk*) chunk;
8724       fc->set_size(size);
8725       assert(!_sp->verify_chunk_in_free_list(fc),
8726         "chunk should not be in free lists yet");
8727     }
8728     if (CMSTraceSweeper) {
8729       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8730                     chunk, size);
8731     }
8732     // A new free range is going to be starting.  The current
8733     // free range has not been added to the free lists yet or
8734     // was removed so add it back.
8735     // If the current free range was coalesced, then the death
8736     // of the free range was recorded.  Record a birth now.
8737     if (lastFreeRangeCoalesced()) {
8738       _sp->coalBirth(size);
8739     }
8740     _sp->addChunkAndRepairOffsetTable(chunk, size,
8741             lastFreeRangeCoalesced());
8742   } else if (CMSTraceSweeper) {
8743     gclog_or_tty->print_cr("Already in free list: nothing to flush");
8744   }
8745   set_inFreeRange(false);
8746   set_freeRangeInFreeLists(false);
8747 }
8748 
8749 // We take a break if we've been at this for a while,
8750 // so as to avoid monopolizing the locks involved.
8751 void SweepClosure::do_yield_work(HeapWord* addr) {
8752   // Return current free chunk being used for coalescing (if any)
8753   // to the appropriate freelist.  After yielding, the next
8754   // free block encountered will start a coalescing range of
8755   // free blocks.  If the next free block is adjacent to the
8756   // chunk just flushed, they will need to wait for the next
8757   // sweep to be coalesced.
8758   if (inFreeRange()) {
8759     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8760   }
8761 
8762   // First give up the locks, then yield, then re-lock.
8763   // We should probably use a constructor/destructor idiom to
8764   // do this unlock/lock or modify the MutexUnlocker class to
8765   // serve our purpose. XXX
8766   assert_lock_strong(_bitMap->lock());
8767   assert_lock_strong(_freelistLock);
8768   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8769          "CMS thread should hold CMS token");
8770   _bitMap->lock()->unlock();
8771   _freelistLock->unlock();
8772   ConcurrentMarkSweepThread::desynchronize(true);
8773   ConcurrentMarkSweepThread::acknowledge_yield_request();
8774   _collector->stopTimer();
8775   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8776   if (PrintCMSStatistics != 0) {
8777     _collector->incrementYields();
8778   }
8779   _collector->icms_wait();
8780 
8781   // See the comment in coordinator_yield()
8782   for (unsigned i = 0; i < CMSYieldSleepCount &&
8783                        ConcurrentMarkSweepThread::should_yield() &&
8784                        !CMSCollector::foregroundGCIsActive(); ++i) {
8785     os::sleep(Thread::current(), 1, false);
8786     ConcurrentMarkSweepThread::acknowledge_yield_request();
8787   }
8788 
8789   ConcurrentMarkSweepThread::synchronize(true);
8790   _freelistLock->lock();
8791   _bitMap->lock()->lock_without_safepoint_check();
8792   _collector->startTimer();
8793 }
8794 
8795 #ifndef PRODUCT
8796 // This is actually very useful in a product build if it can
8797 // be called from the debugger.  Compile it into the product
8798 // as needed.
8799 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8800   return debug_cms_space->verify_chunk_in_free_list(fc);
8801 }
8802 #endif
8803 
8804 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8805   if (CMSTraceSweeper) {
8806     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8807                            fc, fc->size());
8808   }
8809 }
8810 
8811 // CMSIsAliveClosure
8812 bool CMSIsAliveClosure::do_object_b(oop obj) {
8813   HeapWord* addr = (HeapWord*)obj;
8814   return addr != NULL &&
8815          (!_span.contains(addr) || _bit_map->isMarked(addr));
8816 }
8817 
8818 
8819 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8820                       MemRegion span,
8821                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8822                       bool cpc):
8823   _collector(collector),
8824   _span(span),
8825   _bit_map(bit_map),
8826   _mark_stack(mark_stack),
8827   _concurrent_precleaning(cpc) {
8828   assert(!_span.is_empty(), "Empty span could spell trouble");
8829 }
8830 
8831 
8832 // CMSKeepAliveClosure: the serial version
8833 void CMSKeepAliveClosure::do_oop(oop obj) {
8834   HeapWord* addr = (HeapWord*)obj;
8835   if (_span.contains(addr) &&
8836       !_bit_map->isMarked(addr)) {
8837     _bit_map->mark(addr);
8838     bool simulate_overflow = false;
8839     NOT_PRODUCT(
8840       if (CMSMarkStackOverflowALot &&
8841           _collector->simulate_overflow()) {
8842         // simulate a stack overflow
8843         simulate_overflow = true;
8844       }
8845     )
8846     if (simulate_overflow || !_mark_stack->push(obj)) {
8847       if (_concurrent_precleaning) {
8848         // We dirty the overflown object and let the remark
8849         // phase deal with it.
8850         assert(_collector->overflow_list_is_empty(), "Error");
8851         // In the case of object arrays, we need to dirty all of
8852         // the cards that the object spans. No locking or atomics
8853         // are needed since no one else can be mutating the mod union
8854         // table.
8855         if (obj->is_objArray()) {
8856           size_t sz = obj->size();
8857           HeapWord* end_card_addr =
8858             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8859           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8860           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8861           _collector->_modUnionTable.mark_range(redirty_range);
8862         } else {
8863           _collector->_modUnionTable.mark(addr);
8864         }
8865         _collector->_ser_kac_preclean_ovflw++;
8866       } else {
8867         _collector->push_on_overflow_list(obj);
8868         _collector->_ser_kac_ovflw++;
8869       }
8870     }
8871   }
8872 }
8873 
8874 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8875 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8876 
8877 // CMSParKeepAliveClosure: a parallel version of the above.
8878 // The work queues are private to each closure (thread),
8879 // but (may be) available for stealing by other threads.
8880 void CMSParKeepAliveClosure::do_oop(oop obj) {
8881   HeapWord* addr = (HeapWord*)obj;
8882   if (_span.contains(addr) &&
8883       !_bit_map->isMarked(addr)) {
8884     // In general, during recursive tracing, several threads
8885     // may be concurrently getting here; the first one to
8886     // "tag" it, claims it.
8887     if (_bit_map->par_mark(addr)) {
8888       bool res = _work_queue->push(obj);
8889       assert(res, "Low water mark should be much less than capacity");
8890       // Do a recursive trim in the hope that this will keep
8891       // stack usage lower, but leave some oops for potential stealers
8892       trim_queue(_low_water_mark);
8893     } // Else, another thread got there first
8894   }
8895 }
8896 
8897 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8898 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8899 
8900 void CMSParKeepAliveClosure::trim_queue(uint max) {
8901   while (_work_queue->size() > max) {
8902     oop new_oop;
8903     if (_work_queue->pop_local(new_oop)) {
8904       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8905       assert(_bit_map->isMarked((HeapWord*)new_oop),
8906              "no white objects on this stack!");
8907       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8908       // iterate over the oops in this oop, marking and pushing
8909       // the ones in CMS heap (i.e. in _span).
8910       new_oop->oop_iterate(&_mark_and_push);
8911     }
8912   }
8913 }
8914 
8915 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8916                                 CMSCollector* collector,
8917                                 MemRegion span, CMSBitMap* bit_map,
8918                                 OopTaskQueue* work_queue):
8919   _collector(collector),
8920   _span(span),
8921   _bit_map(bit_map),
8922   _work_queue(work_queue) { }
8923 
8924 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8925   HeapWord* addr = (HeapWord*)obj;
8926   if (_span.contains(addr) &&
8927       !_bit_map->isMarked(addr)) {
8928     if (_bit_map->par_mark(addr)) {
8929       bool simulate_overflow = false;
8930       NOT_PRODUCT(
8931         if (CMSMarkStackOverflowALot &&
8932             _collector->par_simulate_overflow()) {
8933           // simulate a stack overflow
8934           simulate_overflow = true;
8935         }
8936       )
8937       if (simulate_overflow || !_work_queue->push(obj)) {
8938         _collector->par_push_on_overflow_list(obj);
8939         _collector->_par_kac_ovflw++;
8940       }
8941     } // Else another thread got there already
8942   }
8943 }
8944 
8945 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8946 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8947 
8948 //////////////////////////////////////////////////////////////////
8949 //  CMSExpansionCause                /////////////////////////////
8950 //////////////////////////////////////////////////////////////////
8951 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8952   switch (cause) {
8953     case _no_expansion:
8954       return "No expansion";
8955     case _satisfy_free_ratio:
8956       return "Free ratio";
8957     case _satisfy_promotion:
8958       return "Satisfy promotion";
8959     case _satisfy_allocation:
8960       return "allocation";
8961     case _allocate_par_lab:
8962       return "Par LAB";
8963     case _allocate_par_spooling_space:
8964       return "Par Spooling Space";
8965     case _adaptive_size_policy:
8966       return "Ergonomics";
8967     default:
8968       return "unknown";
8969   }
8970 }
8971 
8972 void CMSDrainMarkingStackClosure::do_void() {
8973   // the max number to take from overflow list at a time
8974   const size_t num = _mark_stack->capacity()/4;
8975   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8976          "Overflow list should be NULL during concurrent phases");
8977   while (!_mark_stack->isEmpty() ||
8978          // if stack is empty, check the overflow list
8979          _collector->take_from_overflow_list(num, _mark_stack)) {
8980     oop obj = _mark_stack->pop();
8981     HeapWord* addr = (HeapWord*)obj;
8982     assert(_span.contains(addr), "Should be within span");
8983     assert(_bit_map->isMarked(addr), "Should be marked");
8984     assert(obj->is_oop(), "Should be an oop");
8985     obj->oop_iterate(_keep_alive);
8986   }
8987 }
8988 
8989 void CMSParDrainMarkingStackClosure::do_void() {
8990   // drain queue
8991   trim_queue(0);
8992 }
8993 
8994 // Trim our work_queue so its length is below max at return
8995 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8996   while (_work_queue->size() > max) {
8997     oop new_oop;
8998     if (_work_queue->pop_local(new_oop)) {
8999       assert(new_oop->is_oop(), "Expected an oop");
9000       assert(_bit_map->isMarked((HeapWord*)new_oop),
9001              "no white objects on this stack!");
9002       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
9003       // iterate over the oops in this oop, marking and pushing
9004       // the ones in CMS heap (i.e. in _span).
9005       new_oop->oop_iterate(&_mark_and_push);
9006     }
9007   }
9008 }
9009 
9010 ////////////////////////////////////////////////////////////////////
9011 // Support for Marking Stack Overflow list handling and related code
9012 ////////////////////////////////////////////////////////////////////
9013 // Much of the following code is similar in shape and spirit to the
9014 // code used in ParNewGC. We should try and share that code
9015 // as much as possible in the future.
9016 
9017 #ifndef PRODUCT
9018 // Debugging support for CMSStackOverflowALot
9019 
9020 // It's OK to call this multi-threaded;  the worst thing
9021 // that can happen is that we'll get a bunch of closely
9022 // spaced simulated oveflows, but that's OK, in fact
9023 // probably good as it would exercise the overflow code
9024 // under contention.
9025 bool CMSCollector::simulate_overflow() {
9026   if (_overflow_counter-- <= 0) { // just being defensive
9027     _overflow_counter = CMSMarkStackOverflowInterval;
9028     return true;
9029   } else {
9030     return false;
9031   }
9032 }
9033 
9034 bool CMSCollector::par_simulate_overflow() {
9035   return simulate_overflow();
9036 }
9037 #endif
9038 
9039 // Single-threaded
9040 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
9041   assert(stack->isEmpty(), "Expected precondition");
9042   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
9043   size_t i = num;
9044   oop  cur = _overflow_list;
9045   const markOop proto = markOopDesc::prototype();
9046   NOT_PRODUCT(ssize_t n = 0;)
9047   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
9048     next = oop(cur->mark());
9049     cur->set_mark(proto);   // until proven otherwise
9050     assert(cur->is_oop(), "Should be an oop");
9051     bool res = stack->push(cur);
9052     assert(res, "Bit off more than can chew?");
9053     NOT_PRODUCT(n++;)
9054   }
9055   _overflow_list = cur;
9056 #ifndef PRODUCT
9057   assert(_num_par_pushes >= n, "Too many pops?");
9058   _num_par_pushes -=n;
9059 #endif
9060   return !stack->isEmpty();
9061 }
9062 
9063 #define BUSY  (oop(0x1aff1aff))
9064 // (MT-safe) Get a prefix of at most "num" from the list.
9065 // The overflow list is chained through the mark word of
9066 // each object in the list. We fetch the entire list,
9067 // break off a prefix of the right size and return the
9068 // remainder. If other threads try to take objects from
9069 // the overflow list at that time, they will wait for
9070 // some time to see if data becomes available. If (and
9071 // only if) another thread places one or more object(s)
9072 // on the global list before we have returned the suffix
9073 // to the global list, we will walk down our local list
9074 // to find its end and append the global list to
9075 // our suffix before returning it. This suffix walk can
9076 // prove to be expensive (quadratic in the amount of traffic)
9077 // when there are many objects in the overflow list and
9078 // there is much producer-consumer contention on the list.
9079 // *NOTE*: The overflow list manipulation code here and
9080 // in ParNewGeneration:: are very similar in shape,
9081 // except that in the ParNew case we use the old (from/eden)
9082 // copy of the object to thread the list via its klass word.
9083 // Because of the common code, if you make any changes in
9084 // the code below, please check the ParNew version to see if
9085 // similar changes might be needed.
9086 // CR 6797058 has been filed to consolidate the common code.
9087 bool CMSCollector::par_take_from_overflow_list(size_t num,
9088                                                OopTaskQueue* work_q,
9089                                                int no_of_gc_threads) {
9090   assert(work_q->size() == 0, "First empty local work queue");
9091   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9092   if (_overflow_list == NULL) {
9093     return false;
9094   }
9095   // Grab the entire list; we'll put back a suffix
9096   oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
9097   Thread* tid = Thread::current();
9098   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9099   // set to ParallelGCThreads.
9100   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9101   size_t sleep_time_millis = MAX2((size_t)1, num/100);
9102   // If the list is busy, we spin for a short while,
9103   // sleeping between attempts to get the list.
9104   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9105     os::sleep(tid, sleep_time_millis, false);
9106     if (_overflow_list == NULL) {
9107       // Nothing left to take
9108       return false;
9109     } else if (_overflow_list != BUSY) {
9110       // Try and grab the prefix
9111       prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
9112     }
9113   }
9114   // If the list was found to be empty, or we spun long
9115   // enough, we give up and return empty-handed. If we leave
9116   // the list in the BUSY state below, it must be the case that
9117   // some other thread holds the overflow list and will set it
9118   // to a non-BUSY state in the future.
9119   if (prefix == NULL || prefix == BUSY) {
9120      // Nothing to take or waited long enough
9121      if (prefix == NULL) {
9122        // Write back the NULL in case we overwrote it with BUSY above
9123        // and it is still the same value.
9124        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9125      }
9126      return false;
9127   }
9128   assert(prefix != NULL && prefix != BUSY, "Error");
9129   size_t i = num;
9130   oop cur = prefix;
9131   // Walk down the first "num" objects, unless we reach the end.
9132   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9133   if (cur->mark() == NULL) {
9134     // We have "num" or fewer elements in the list, so there
9135     // is nothing to return to the global list.
9136     // Write back the NULL in lieu of the BUSY we wrote
9137     // above, if it is still the same value.
9138     if (_overflow_list == BUSY) {
9139       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9140     }
9141   } else {
9142     // Chop off the suffix and rerturn it to the global list.
9143     assert(cur->mark() != BUSY, "Error");
9144     oop suffix_head = cur->mark(); // suffix will be put back on global list
9145     cur->set_mark(NULL);           // break off suffix
9146     // It's possible that the list is still in the empty(busy) state
9147     // we left it in a short while ago; in that case we may be
9148     // able to place back the suffix without incurring the cost
9149     // of a walk down the list.
9150     oop observed_overflow_list = _overflow_list;
9151     oop cur_overflow_list = observed_overflow_list;
9152     bool attached = false;
9153     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9154       observed_overflow_list =
9155         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9156       if (cur_overflow_list == observed_overflow_list) {
9157         attached = true;
9158         break;
9159       } else cur_overflow_list = observed_overflow_list;
9160     }
9161     if (!attached) {
9162       // Too bad, someone else sneaked in (at least) an element; we'll need
9163       // to do a splice. Find tail of suffix so we can prepend suffix to global
9164       // list.
9165       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9166       oop suffix_tail = cur;
9167       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9168              "Tautology");
9169       observed_overflow_list = _overflow_list;
9170       do {
9171         cur_overflow_list = observed_overflow_list;
9172         if (cur_overflow_list != BUSY) {
9173           // Do the splice ...
9174           suffix_tail->set_mark(markOop(cur_overflow_list));
9175         } else { // cur_overflow_list == BUSY
9176           suffix_tail->set_mark(NULL);
9177         }
9178         // ... and try to place spliced list back on overflow_list ...
9179         observed_overflow_list =
9180           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9181       } while (cur_overflow_list != observed_overflow_list);
9182       // ... until we have succeeded in doing so.
9183     }
9184   }
9185 
9186   // Push the prefix elements on work_q
9187   assert(prefix != NULL, "control point invariant");
9188   const markOop proto = markOopDesc::prototype();
9189   oop next;
9190   NOT_PRODUCT(ssize_t n = 0;)
9191   for (cur = prefix; cur != NULL; cur = next) {
9192     next = oop(cur->mark());
9193     cur->set_mark(proto);   // until proven otherwise
9194     assert(cur->is_oop(), "Should be an oop");
9195     bool res = work_q->push(cur);
9196     assert(res, "Bit off more than we can chew?");
9197     NOT_PRODUCT(n++;)
9198   }
9199 #ifndef PRODUCT
9200   assert(_num_par_pushes >= n, "Too many pops?");
9201   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9202 #endif
9203   return true;
9204 }
9205 
9206 // Single-threaded
9207 void CMSCollector::push_on_overflow_list(oop p) {
9208   NOT_PRODUCT(_num_par_pushes++;)
9209   assert(p->is_oop(), "Not an oop");
9210   preserve_mark_if_necessary(p);
9211   p->set_mark((markOop)_overflow_list);
9212   _overflow_list = p;
9213 }
9214 
9215 // Multi-threaded; use CAS to prepend to overflow list
9216 void CMSCollector::par_push_on_overflow_list(oop p) {
9217   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9218   assert(p->is_oop(), "Not an oop");
9219   par_preserve_mark_if_necessary(p);
9220   oop observed_overflow_list = _overflow_list;
9221   oop cur_overflow_list;
9222   do {
9223     cur_overflow_list = observed_overflow_list;
9224     if (cur_overflow_list != BUSY) {
9225       p->set_mark(markOop(cur_overflow_list));
9226     } else {
9227       p->set_mark(NULL);
9228     }
9229     observed_overflow_list =
9230       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9231   } while (cur_overflow_list != observed_overflow_list);
9232 }
9233 #undef BUSY
9234 
9235 // Single threaded
9236 // General Note on GrowableArray: pushes may silently fail
9237 // because we are (temporarily) out of C-heap for expanding
9238 // the stack. The problem is quite ubiquitous and affects
9239 // a lot of code in the JVM. The prudent thing for GrowableArray
9240 // to do (for now) is to exit with an error. However, that may
9241 // be too draconian in some cases because the caller may be
9242 // able to recover without much harm. For such cases, we
9243 // should probably introduce a "soft_push" method which returns
9244 // an indication of success or failure with the assumption that
9245 // the caller may be able to recover from a failure; code in
9246 // the VM can then be changed, incrementally, to deal with such
9247 // failures where possible, thus, incrementally hardening the VM
9248 // in such low resource situations.
9249 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9250   _preserved_oop_stack.push(p);
9251   _preserved_mark_stack.push(m);
9252   assert(m == p->mark(), "Mark word changed");
9253   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9254          "bijection");
9255 }
9256 
9257 // Single threaded
9258 void CMSCollector::preserve_mark_if_necessary(oop p) {
9259   markOop m = p->mark();
9260   if (m->must_be_preserved(p)) {
9261     preserve_mark_work(p, m);
9262   }
9263 }
9264 
9265 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9266   markOop m = p->mark();
9267   if (m->must_be_preserved(p)) {
9268     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9269     // Even though we read the mark word without holding
9270     // the lock, we are assured that it will not change
9271     // because we "own" this oop, so no other thread can
9272     // be trying to push it on the overflow list; see
9273     // the assertion in preserve_mark_work() that checks
9274     // that m == p->mark().
9275     preserve_mark_work(p, m);
9276   }
9277 }
9278 
9279 // We should be able to do this multi-threaded,
9280 // a chunk of stack being a task (this is
9281 // correct because each oop only ever appears
9282 // once in the overflow list. However, it's
9283 // not very easy to completely overlap this with
9284 // other operations, so will generally not be done
9285 // until all work's been completed. Because we
9286 // expect the preserved oop stack (set) to be small,
9287 // it's probably fine to do this single-threaded.
9288 // We can explore cleverer concurrent/overlapped/parallel
9289 // processing of preserved marks if we feel the
9290 // need for this in the future. Stack overflow should
9291 // be so rare in practice and, when it happens, its
9292 // effect on performance so great that this will
9293 // likely just be in the noise anyway.
9294 void CMSCollector::restore_preserved_marks_if_any() {
9295   assert(SafepointSynchronize::is_at_safepoint(),
9296          "world should be stopped");
9297   assert(Thread::current()->is_ConcurrentGC_thread() ||
9298          Thread::current()->is_VM_thread(),
9299          "should be single-threaded");
9300   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9301          "bijection");
9302 
9303   while (!_preserved_oop_stack.is_empty()) {
9304     oop p = _preserved_oop_stack.pop();
9305     assert(p->is_oop(), "Should be an oop");
9306     assert(_span.contains(p), "oop should be in _span");
9307     assert(p->mark() == markOopDesc::prototype(),
9308            "Set when taken from overflow list");
9309     markOop m = _preserved_mark_stack.pop();
9310     p->set_mark(m);
9311   }
9312   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9313          "stacks were cleared above");
9314 }
9315 
9316 #ifndef PRODUCT
9317 bool CMSCollector::no_preserved_marks() const {
9318   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9319 }
9320 #endif
9321 
9322 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9323 {
9324   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9325   CMSAdaptiveSizePolicy* size_policy =
9326     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9327   assert(size_policy->is_gc_cms_adaptive_size_policy(),
9328     "Wrong type for size policy");
9329   return size_policy;
9330 }
9331 
9332 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9333                                            size_t desired_promo_size) {
9334   if (cur_promo_size < desired_promo_size) {
9335     size_t expand_bytes = desired_promo_size - cur_promo_size;
9336     if (PrintAdaptiveSizePolicy && Verbose) {
9337       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9338         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9339         expand_bytes);
9340     }
9341     expand(expand_bytes,
9342            MinHeapDeltaBytes,
9343            CMSExpansionCause::_adaptive_size_policy);
9344   } else if (desired_promo_size < cur_promo_size) {
9345     size_t shrink_bytes = cur_promo_size - desired_promo_size;
9346     if (PrintAdaptiveSizePolicy && Verbose) {
9347       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9348         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9349         shrink_bytes);
9350     }
9351     shrink(shrink_bytes);
9352   }
9353 }
9354 
9355 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9356   GenCollectedHeap* gch = GenCollectedHeap::heap();
9357   CMSGCAdaptivePolicyCounters* counters =
9358     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9359   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9360     "Wrong kind of counters");
9361   return counters;
9362 }
9363 
9364 
9365 void ASConcurrentMarkSweepGeneration::update_counters() {
9366   if (UsePerfData) {
9367     _space_counters->update_all();
9368     _gen_counters->update_all();
9369     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9370     GenCollectedHeap* gch = GenCollectedHeap::heap();
9371     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9372     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9373       "Wrong gc statistics type");
9374     counters->update_counters(gc_stats_l);
9375   }
9376 }
9377 
9378 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9379   if (UsePerfData) {
9380     _space_counters->update_used(used);
9381     _space_counters->update_capacity();
9382     _gen_counters->update_all();
9383 
9384     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9385     GenCollectedHeap* gch = GenCollectedHeap::heap();
9386     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9387     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9388       "Wrong gc statistics type");
9389     counters->update_counters(gc_stats_l);
9390   }
9391 }
9392 
9393 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9394   assert_locked_or_safepoint(Heap_lock);
9395   assert_lock_strong(freelistLock());
9396   HeapWord* old_end = _cmsSpace->end();
9397   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9398   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9399   FreeChunk* chunk_at_end = find_chunk_at_end();
9400   if (chunk_at_end == NULL) {
9401     // No room to shrink
9402     if (PrintGCDetails && Verbose) {
9403       gclog_or_tty->print_cr("No room to shrink: old_end  "
9404         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
9405         " chunk_at_end  " PTR_FORMAT,
9406         old_end, unallocated_start, chunk_at_end);
9407     }
9408     return;
9409   } else {
9410 
9411     // Find the chunk at the end of the space and determine
9412     // how much it can be shrunk.
9413     size_t shrinkable_size_in_bytes = chunk_at_end->size();
9414     size_t aligned_shrinkable_size_in_bytes =
9415       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9416     assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9417       "Inconsistent chunk at end of space");
9418     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9419     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9420 
9421     // Shrink the underlying space
9422     _virtual_space.shrink_by(bytes);
9423     if (PrintGCDetails && Verbose) {
9424       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9425         " desired_bytes " SIZE_FORMAT
9426         " shrinkable_size_in_bytes " SIZE_FORMAT
9427         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9428         "  bytes  " SIZE_FORMAT,
9429         desired_bytes, shrinkable_size_in_bytes,
9430         aligned_shrinkable_size_in_bytes, bytes);
9431       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
9432         "  unallocated_start  " SIZE_FORMAT,
9433         old_end, unallocated_start);
9434     }
9435 
9436     // If the space did shrink (shrinking is not guaranteed),
9437     // shrink the chunk at the end by the appropriate amount.
9438     if (((HeapWord*)_virtual_space.high()) < old_end) {
9439       size_t new_word_size =
9440         heap_word_size(_virtual_space.committed_size());
9441 
9442       // Have to remove the chunk from the dictionary because it is changing
9443       // size and might be someplace elsewhere in the dictionary.
9444 
9445       // Get the chunk at end, shrink it, and put it
9446       // back.
9447       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9448       size_t word_size_change = word_size_before - new_word_size;
9449       size_t chunk_at_end_old_size = chunk_at_end->size();
9450       assert(chunk_at_end_old_size >= word_size_change,
9451         "Shrink is too large");
9452       chunk_at_end->set_size(chunk_at_end_old_size -
9453                           word_size_change);
9454       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9455         word_size_change);
9456 
9457       _cmsSpace->returnChunkToDictionary(chunk_at_end);
9458 
9459       MemRegion mr(_cmsSpace->bottom(), new_word_size);
9460       _bts->resize(new_word_size);  // resize the block offset shared array
9461       Universe::heap()->barrier_set()->resize_covered_region(mr);
9462       _cmsSpace->assert_locked();
9463       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9464 
9465       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9466 
9467       // update the space and generation capacity counters
9468       if (UsePerfData) {
9469         _space_counters->update_capacity();
9470         _gen_counters->update_all();
9471       }
9472 
9473       if (Verbose && PrintGCDetails) {
9474         size_t new_mem_size = _virtual_space.committed_size();
9475         size_t old_mem_size = new_mem_size + bytes;
9476         gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9477                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9478       }
9479     }
9480 
9481     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9482       "Inconsistency at end of space");
9483     assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9484       "Shrinking is inconsistent");
9485     return;
9486   }
9487 }
9488 // Transfer some number of overflown objects to usual marking
9489 // stack. Return true if some objects were transferred.
9490 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9491   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9492                     (size_t)ParGCDesiredObjsFromOverflowList);
9493 
9494   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9495   assert(_collector->overflow_list_is_empty() || res,
9496          "If list is not empty, we should have taken something");
9497   assert(!res || !_mark_stack->isEmpty(),
9498          "If we took something, it should now be on our stack");
9499   return res;
9500 }
9501 
9502 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9503   size_t res = _sp->block_size_no_stall(addr, _collector);
9504   if (_sp->block_is_obj(addr)) {
9505     if (_live_bit_map->isMarked(addr)) {
9506       // It can't have been dead in a previous cycle
9507       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9508     } else {
9509       _dead_bit_map->mark(addr);      // mark the dead object
9510     }
9511   }
9512   // Could be 0, if the block size could not be computed without stalling.
9513   return res;
9514 }
9515 
9516 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9517 
9518   switch (phase) {
9519     case CMSCollector::InitialMarking:
9520       initialize(true  /* fullGC */ ,
9521                  cause /* cause of the GC */,
9522                  true  /* recordGCBeginTime */,
9523                  true  /* recordPreGCUsage */,
9524                  false /* recordPeakUsage */,
9525                  false /* recordPostGCusage */,
9526                  true  /* recordAccumulatedGCTime */,
9527                  false /* recordGCEndTime */,
9528                  false /* countCollection */  );
9529       break;
9530 
9531     case CMSCollector::FinalMarking:
9532       initialize(true  /* fullGC */ ,
9533                  cause /* cause of the GC */,
9534                  false /* recordGCBeginTime */,
9535                  false /* recordPreGCUsage */,
9536                  false /* recordPeakUsage */,
9537                  false /* recordPostGCusage */,
9538                  true  /* recordAccumulatedGCTime */,
9539                  false /* recordGCEndTime */,
9540                  false /* countCollection */  );
9541       break;
9542 
9543     case CMSCollector::Sweeping:
9544       initialize(true  /* fullGC */ ,
9545                  cause /* cause of the GC */,
9546                  false /* recordGCBeginTime */,
9547                  false /* recordPreGCUsage */,
9548                  true  /* recordPeakUsage */,
9549                  true  /* recordPostGCusage */,
9550                  false /* recordAccumulatedGCTime */,
9551                  true  /* recordGCEndTime */,
9552                  true  /* countCollection */  );
9553       break;
9554 
9555     default:
9556       ShouldNotReachHere();
9557   }
9558 }