1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  37 #include "gc_implementation/parNew/parNewGeneration.hpp"
  38 #include "gc_implementation/shared/collectorCounters.hpp"
  39 #include "gc_implementation/shared/gcTimer.hpp"
  40 #include "gc_implementation/shared/gcTrace.hpp"
  41 #include "gc_implementation/shared/gcTraceTime.hpp"
  42 #include "gc_implementation/shared/isGCActiveMark.hpp"
  43 #include "gc_interface/collectedHeap.inline.hpp"
  44 #include "memory/allocation.hpp"
  45 #include "memory/cardTableRS.hpp"
  46 #include "memory/collectorPolicy.hpp"
  47 #include "memory/gcLocker.inline.hpp"
  48 #include "memory/genCollectedHeap.hpp"
  49 #include "memory/genMarkSweep.hpp"
  50 #include "memory/genOopClosures.inline.hpp"
  51 #include "memory/iterator.hpp"
  52 #include "memory/referencePolicy.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/memoryService.hpp"
  61 #include "services/runtimeService.hpp"
  62 
  63 // statics
  64 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  65 bool CMSCollector::_full_gc_requested = false;
  66 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  67 
  68 //////////////////////////////////////////////////////////////////
  69 // In support of CMS/VM thread synchronization
  70 //////////////////////////////////////////////////////////////////
  71 // We split use of the CGC_lock into 2 "levels".
  72 // The low-level locking is of the usual CGC_lock monitor. We introduce
  73 // a higher level "token" (hereafter "CMS token") built on top of the
  74 // low level monitor (hereafter "CGC lock").
  75 // The token-passing protocol gives priority to the VM thread. The
  76 // CMS-lock doesn't provide any fairness guarantees, but clients
  77 // should ensure that it is only held for very short, bounded
  78 // durations.
  79 //
  80 // When either of the CMS thread or the VM thread is involved in
  81 // collection operations during which it does not want the other
  82 // thread to interfere, it obtains the CMS token.
  83 //
  84 // If either thread tries to get the token while the other has
  85 // it, that thread waits. However, if the VM thread and CMS thread
  86 // both want the token, then the VM thread gets priority while the
  87 // CMS thread waits. This ensures, for instance, that the "concurrent"
  88 // phases of the CMS thread's work do not block out the VM thread
  89 // for long periods of time as the CMS thread continues to hog
  90 // the token. (See bug 4616232).
  91 //
  92 // The baton-passing functions are, however, controlled by the
  93 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
  94 // and here the low-level CMS lock, not the high level token,
  95 // ensures mutual exclusion.
  96 //
  97 // Two important conditions that we have to satisfy:
  98 // 1. if a thread does a low-level wait on the CMS lock, then it
  99 //    relinquishes the CMS token if it were holding that token
 100 //    when it acquired the low-level CMS lock.
 101 // 2. any low-level notifications on the low-level lock
 102 //    should only be sent when a thread has relinquished the token.
 103 //
 104 // In the absence of either property, we'd have potential deadlock.
 105 //
 106 // We protect each of the CMS (concurrent and sequential) phases
 107 // with the CMS _token_, not the CMS _lock_.
 108 //
 109 // The only code protected by CMS lock is the token acquisition code
 110 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 111 // baton-passing code.
 112 //
 113 // Unfortunately, i couldn't come up with a good abstraction to factor and
 114 // hide the naked CGC_lock manipulation in the baton-passing code
 115 // further below. That's something we should try to do. Also, the proof
 116 // of correctness of this 2-level locking scheme is far from obvious,
 117 // and potentially quite slippery. We have an uneasy supsicion, for instance,
 118 // that there may be a theoretical possibility of delay/starvation in the
 119 // low-level lock/wait/notify scheme used for the baton-passing because of
 120 // potential intereference with the priority scheme embodied in the
 121 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 122 // invocation further below and marked with "XXX 20011219YSR".
 123 // Indeed, as we note elsewhere, this may become yet more slippery
 124 // in the presence of multiple CMS and/or multiple VM threads. XXX
 125 
 126 class CMSTokenSync: public StackObj {
 127  private:
 128   bool _is_cms_thread;
 129  public:
 130   CMSTokenSync(bool is_cms_thread):
 131     _is_cms_thread(is_cms_thread) {
 132     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 133            "Incorrect argument to constructor");
 134     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 135   }
 136 
 137   ~CMSTokenSync() {
 138     assert(_is_cms_thread ?
 139              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 140              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 141           "Incorrect state");
 142     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 143   }
 144 };
 145 
 146 // Convenience class that does a CMSTokenSync, and then acquires
 147 // upto three locks.
 148 class CMSTokenSyncWithLocks: public CMSTokenSync {
 149  private:
 150   // Note: locks are acquired in textual declaration order
 151   // and released in the opposite order
 152   MutexLockerEx _locker1, _locker2, _locker3;
 153  public:
 154   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 155                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 156     CMSTokenSync(is_cms_thread),
 157     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 158     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 159     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 160   { }
 161 };
 162 
 163 
 164 // Wrapper class to temporarily disable icms during a foreground cms collection.
 165 class ICMSDisabler: public StackObj {
 166  public:
 167   // The ctor disables icms and wakes up the thread so it notices the change;
 168   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 169   // CMSIncrementalMode.
 170   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 171   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 172 };
 173 
 174 //////////////////////////////////////////////////////////////////
 175 //  Concurrent Mark-Sweep Generation /////////////////////////////
 176 //////////////////////////////////////////////////////////////////
 177 
 178 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 179 
 180 // This struct contains per-thread things necessary to support parallel
 181 // young-gen collection.
 182 class CMSParGCThreadState: public CHeapObj<mtGC> {
 183  public:
 184   CFLS_LAB lab;
 185   PromotionInfo promo;
 186 
 187   // Constructor.
 188   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 189     promo.setSpace(cfls);
 190   }
 191 };
 192 
 193 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 194      ReservedSpace rs, size_t initial_byte_size, int level,
 195      CardTableRS* ct, bool use_adaptive_freelists,
 196      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 197   CardGeneration(rs, initial_byte_size, level, ct),
 198   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 199   _debug_collection_type(Concurrent_collection_type)
 200 {
 201   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 202   HeapWord* end    = (HeapWord*) _virtual_space.high();
 203 
 204   _direct_allocated_words = 0;
 205   NOT_PRODUCT(
 206     _numObjectsPromoted = 0;
 207     _numWordsPromoted = 0;
 208     _numObjectsAllocated = 0;
 209     _numWordsAllocated = 0;
 210   )
 211 
 212   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 213                                            use_adaptive_freelists,
 214                                            dictionaryChoice);
 215   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 216   if (_cmsSpace == NULL) {
 217     vm_exit_during_initialization(
 218       "CompactibleFreeListSpace allocation failure");
 219   }
 220   _cmsSpace->_gen = this;
 221 
 222   _gc_stats = new CMSGCStats();
 223 
 224   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 225   // offsets match. The ability to tell free chunks from objects
 226   // depends on this property.
 227   debug_only(
 228     FreeChunk* junk = NULL;
 229     assert(UseCompressedOops ||
 230            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 231            "Offset of FreeChunk::_prev within FreeChunk must match"
 232            "  that of OopDesc::_klass within OopDesc");
 233   )
 234   if (CollectedHeap::use_parallel_gc_threads()) {
 235     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 236     _par_gc_thread_states =
 237       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 238     if (_par_gc_thread_states == NULL) {
 239       vm_exit_during_initialization("Could not allocate par gc structs");
 240     }
 241     for (uint i = 0; i < ParallelGCThreads; i++) {
 242       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 243       if (_par_gc_thread_states[i] == NULL) {
 244         vm_exit_during_initialization("Could not allocate par gc structs");
 245       }
 246     }
 247   } else {
 248     _par_gc_thread_states = NULL;
 249   }
 250   _incremental_collection_failed = false;
 251   // The "dilatation_factor" is the expansion that can occur on
 252   // account of the fact that the minimum object size in the CMS
 253   // generation may be larger than that in, say, a contiguous young
 254   //  generation.
 255   // Ideally, in the calculation below, we'd compute the dilatation
 256   // factor as: MinChunkSize/(promoting_gen's min object size)
 257   // Since we do not have such a general query interface for the
 258   // promoting generation, we'll instead just use the mimimum
 259   // object size (which today is a header's worth of space);
 260   // note that all arithmetic is in units of HeapWords.
 261   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 262   assert(_dilatation_factor >= 1.0, "from previous assert");
 263 }
 264 
 265 
 266 // The field "_initiating_occupancy" represents the occupancy percentage
 267 // at which we trigger a new collection cycle.  Unless explicitly specified
 268 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
 269 // is calculated by:
 270 //
 271 //   Let "f" be MinHeapFreeRatio in
 272 //
 273 //    _intiating_occupancy = 100-f +
 274 //                           f * (CMSTrigger[Perm]Ratio/100)
 275 //   where CMSTrigger[Perm]Ratio is the argument "tr" below.
 276 //
 277 // That is, if we assume the heap is at its desired maximum occupancy at the
 278 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
 279 // space be allocated before initiating a new collection cycle.
 280 //
 281 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
 282   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
 283   if (io >= 0) {
 284     _initiating_occupancy = (double)io / 100.0;
 285   } else {
 286     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 287                              (double)(tr * MinHeapFreeRatio) / 100.0)
 288                             / 100.0;
 289   }
 290 }
 291 
 292 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 293   assert(collector() != NULL, "no collector");
 294   collector()->ref_processor_init();
 295 }
 296 
 297 void CMSCollector::ref_processor_init() {
 298   if (_ref_processor == NULL) {
 299     // Allocate and initialize a reference processor
 300     _ref_processor =
 301       new ReferenceProcessor(_span,                               // span
 302                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 303                              (int) ParallelGCThreads,             // mt processing degree
 304                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 305                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 306                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 307                              &_is_alive_closure,                  // closure for liveness info
 308                              false);                              // next field updates do not need write barrier
 309     // Initialize the _ref_processor field of CMSGen
 310     _cmsGen->set_ref_processor(_ref_processor);
 311 
 312     // Allocate a dummy ref processor for perm gen.
 313     ReferenceProcessor* rp2 = new ReferenceProcessor();
 314     if (rp2 == NULL) {
 315       vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
 316     }
 317     _permGen->set_ref_processor(rp2);
 318   }
 319 }
 320 
 321 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 322   GenCollectedHeap* gch = GenCollectedHeap::heap();
 323   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 324     "Wrong type of heap");
 325   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 326     gch->gen_policy()->size_policy();
 327   assert(sp->is_gc_cms_adaptive_size_policy(),
 328     "Wrong type of size policy");
 329   return sp;
 330 }
 331 
 332 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
 333   CMSGCAdaptivePolicyCounters* results =
 334     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
 335   assert(
 336     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
 337     "Wrong gc policy counter kind");
 338   return results;
 339 }
 340 
 341 
 342 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 343 
 344   const char* gen_name = "old";
 345 
 346   // Generation Counters - generation 1, 1 subspace
 347   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
 348 
 349   _space_counters = new GSpaceCounters(gen_name, 0,
 350                                        _virtual_space.reserved_size(),
 351                                        this, _gen_counters);
 352 }
 353 
 354 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 355   _cms_gen(cms_gen)
 356 {
 357   assert(alpha <= 100, "bad value");
 358   _saved_alpha = alpha;
 359 
 360   // Initialize the alphas to the bootstrap value of 100.
 361   _gc0_alpha = _cms_alpha = 100;
 362 
 363   _cms_begin_time.update();
 364   _cms_end_time.update();
 365 
 366   _gc0_duration = 0.0;
 367   _gc0_period = 0.0;
 368   _gc0_promoted = 0;
 369 
 370   _cms_duration = 0.0;
 371   _cms_period = 0.0;
 372   _cms_allocated = 0;
 373 
 374   _cms_used_at_gc0_begin = 0;
 375   _cms_used_at_gc0_end = 0;
 376   _allow_duty_cycle_reduction = false;
 377   _valid_bits = 0;
 378   _icms_duty_cycle = CMSIncrementalDutyCycle;
 379 }
 380 
 381 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 382   // TBD: CR 6909490
 383   return 1.0;
 384 }
 385 
 386 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 387 }
 388 
 389 // If promotion failure handling is on use
 390 // the padded average size of the promotion for each
 391 // young generation collection.
 392 double CMSStats::time_until_cms_gen_full() const {
 393   size_t cms_free = _cms_gen->cmsSpace()->free();
 394   GenCollectedHeap* gch = GenCollectedHeap::heap();
 395   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 396                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 397   if (cms_free > expected_promotion) {
 398     // Start a cms collection if there isn't enough space to promote
 399     // for the next minor collection.  Use the padded average as
 400     // a safety factor.
 401     cms_free -= expected_promotion;
 402 
 403     // Adjust by the safety factor.
 404     double cms_free_dbl = (double)cms_free;
 405     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 406     // Apply a further correction factor which tries to adjust
 407     // for recent occurance of concurrent mode failures.
 408     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 409     cms_free_dbl = cms_free_dbl * cms_adjustment;
 410 
 411     if (PrintGCDetails && Verbose) {
 412       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 413         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 414         cms_free, expected_promotion);
 415       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 416         cms_free_dbl, cms_consumption_rate() + 1.0);
 417     }
 418     // Add 1 in case the consumption rate goes to zero.
 419     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 420   }
 421   return 0.0;
 422 }
 423 
 424 // Compare the duration of the cms collection to the
 425 // time remaining before the cms generation is empty.
 426 // Note that the time from the start of the cms collection
 427 // to the start of the cms sweep (less than the total
 428 // duration of the cms collection) can be used.  This
 429 // has been tried and some applications experienced
 430 // promotion failures early in execution.  This was
 431 // possibly because the averages were not accurate
 432 // enough at the beginning.
 433 double CMSStats::time_until_cms_start() const {
 434   // We add "gc0_period" to the "work" calculation
 435   // below because this query is done (mostly) at the
 436   // end of a scavenge, so we need to conservatively
 437   // account for that much possible delay
 438   // in the query so as to avoid concurrent mode failures
 439   // due to starting the collection just a wee bit too
 440   // late.
 441   double work = cms_duration() + gc0_period();
 442   double deadline = time_until_cms_gen_full();
 443   // If a concurrent mode failure occurred recently, we want to be
 444   // more conservative and halve our expected time_until_cms_gen_full()
 445   if (work > deadline) {
 446     if (Verbose && PrintGCDetails) {
 447       gclog_or_tty->print(
 448         " CMSCollector: collect because of anticipated promotion "
 449         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 450         gc0_period(), time_until_cms_gen_full());
 451     }
 452     return 0.0;
 453   }
 454   return work - deadline;
 455 }
 456 
 457 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 458 // amount of change to prevent wild oscillation.
 459 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 460                                               unsigned int new_duty_cycle) {
 461   assert(old_duty_cycle <= 100, "bad input value");
 462   assert(new_duty_cycle <= 100, "bad input value");
 463 
 464   // Note:  use subtraction with caution since it may underflow (values are
 465   // unsigned).  Addition is safe since we're in the range 0-100.
 466   unsigned int damped_duty_cycle = new_duty_cycle;
 467   if (new_duty_cycle < old_duty_cycle) {
 468     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 469     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 470       damped_duty_cycle = old_duty_cycle - largest_delta;
 471     }
 472   } else if (new_duty_cycle > old_duty_cycle) {
 473     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 474     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 475       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 476     }
 477   }
 478   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 479 
 480   if (CMSTraceIncrementalPacing) {
 481     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 482                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 483   }
 484   return damped_duty_cycle;
 485 }
 486 
 487 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 488   assert(CMSIncrementalPacing && valid(),
 489          "should be handled in icms_update_duty_cycle()");
 490 
 491   double cms_time_so_far = cms_timer().seconds();
 492   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 493   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 494 
 495   // Avoid division by 0.
 496   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 497   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 498 
 499   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 500   if (new_duty_cycle > _icms_duty_cycle) {
 501     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 502     if (new_duty_cycle > 2) {
 503       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 504                                                 new_duty_cycle);
 505     }
 506   } else if (_allow_duty_cycle_reduction) {
 507     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 508     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 509     // Respect the minimum duty cycle.
 510     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 511     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 512   }
 513 
 514   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 515     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 516   }
 517 
 518   _allow_duty_cycle_reduction = false;
 519   return _icms_duty_cycle;
 520 }
 521 
 522 #ifndef PRODUCT
 523 void CMSStats::print_on(outputStream *st) const {
 524   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 525   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 526                gc0_duration(), gc0_period(), gc0_promoted());
 527   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 528             cms_duration(), cms_duration_per_mb(),
 529             cms_period(), cms_allocated());
 530   st->print(",cms_since_beg=%g,cms_since_end=%g",
 531             cms_time_since_begin(), cms_time_since_end());
 532   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 533             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 534   if (CMSIncrementalMode) {
 535     st->print(",dc=%d", icms_duty_cycle());
 536   }
 537 
 538   if (valid()) {
 539     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 540               promotion_rate(), cms_allocation_rate());
 541     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 542               cms_consumption_rate(), time_until_cms_gen_full());
 543   }
 544   st->print(" ");
 545 }
 546 #endif // #ifndef PRODUCT
 547 
 548 CMSCollector::CollectorState CMSCollector::_collectorState =
 549                              CMSCollector::Idling;
 550 bool CMSCollector::_foregroundGCIsActive = false;
 551 bool CMSCollector::_foregroundGCShouldWait = false;
 552 
 553 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 554                            ConcurrentMarkSweepGeneration* permGen,
 555                            CardTableRS*                   ct,
 556                            ConcurrentMarkSweepPolicy*     cp):
 557   _cmsGen(cmsGen),
 558   _permGen(permGen),
 559   _ct(ct),
 560   _ref_processor(NULL),    // will be set later
 561   _conc_workers(NULL),     // may be set later
 562   _abort_preclean(false),
 563   _start_sampling(false),
 564   _between_prologue_and_epilogue(false),
 565   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 566   _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
 567   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 568                  -1 /* lock-free */, "No_lock" /* dummy */),
 569   _modUnionClosure(&_modUnionTable),
 570   _modUnionClosurePar(&_modUnionTable),
 571   // Adjust my span to cover old (cms) gen and perm gen
 572   _span(cmsGen->reserved()._union(permGen->reserved())),
 573   // Construct the is_alive_closure with _span & markBitMap
 574   _is_alive_closure(_span, &_markBitMap),
 575   _restart_addr(NULL),
 576   _overflow_list(NULL),
 577   _stats(cmsGen),
 578   _eden_chunk_array(NULL),     // may be set in ctor body
 579   _eden_chunk_capacity(0),     // -- ditto --
 580   _eden_chunk_index(0),        // -- ditto --
 581   _survivor_plab_array(NULL),  // -- ditto --
 582   _survivor_chunk_array(NULL), // -- ditto --
 583   _survivor_chunk_capacity(0), // -- ditto --
 584   _survivor_chunk_index(0),    // -- ditto --
 585   _ser_pmc_preclean_ovflw(0),
 586   _ser_kac_preclean_ovflw(0),
 587   _ser_pmc_remark_ovflw(0),
 588   _par_pmc_remark_ovflw(0),
 589   _ser_kac_ovflw(0),
 590   _par_kac_ovflw(0),
 591 #ifndef PRODUCT
 592   _num_par_pushes(0),
 593 #endif
 594   _collection_count_start(0),
 595   _verifying(false),
 596   _icms_start_limit(NULL),
 597   _icms_stop_limit(NULL),
 598   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 599   _completed_initialization(false),
 600   _collector_policy(cp),
 601   _should_unload_classes(false),
 602   _concurrent_cycles_since_last_unload(0),
 603   _roots_scanning_options(0),
 604   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 605   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 606   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 607   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 608   _cms_start_registered(false)
 609 {
 610   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 611     ExplicitGCInvokesConcurrent = true;
 612   }
 613   // Now expand the span and allocate the collection support structures
 614   // (MUT, marking bit map etc.) to cover both generations subject to
 615   // collection.
 616 
 617   // First check that _permGen is adjacent to _cmsGen and above it.
 618   assert(   _cmsGen->reserved().word_size()  > 0
 619          && _permGen->reserved().word_size() > 0,
 620          "generations should not be of zero size");
 621   assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
 622          "_cmsGen and _permGen should not overlap");
 623   assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
 624          "_cmsGen->end() different from _permGen->start()");
 625 
 626   // For use by dirty card to oop closures.
 627   _cmsGen->cmsSpace()->set_collector(this);
 628   _permGen->cmsSpace()->set_collector(this);
 629 
 630   // Allocate MUT and marking bit map
 631   {
 632     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 633     if (!_markBitMap.allocate(_span)) {
 634       warning("Failed to allocate CMS Bit Map");
 635       return;
 636     }
 637     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 638   }
 639   {
 640     _modUnionTable.allocate(_span);
 641     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 642   }
 643 
 644   if (!_markStack.allocate(MarkStackSize)) {
 645     warning("Failed to allocate CMS Marking Stack");
 646     return;
 647   }
 648   if (!_revisitStack.allocate(CMSRevisitStackSize)) {
 649     warning("Failed to allocate CMS Revisit Stack");
 650     return;
 651   }
 652 
 653   // Support for multi-threaded concurrent phases
 654   if (CMSConcurrentMTEnabled) {
 655     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 656       // just for now
 657       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 658     }
 659     if (ConcGCThreads > 1) {
 660       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 661                                  ConcGCThreads, true);
 662       if (_conc_workers == NULL) {
 663         warning("GC/CMS: _conc_workers allocation failure: "
 664               "forcing -CMSConcurrentMTEnabled");
 665         CMSConcurrentMTEnabled = false;
 666       } else {
 667         _conc_workers->initialize_workers();
 668       }
 669     } else {
 670       CMSConcurrentMTEnabled = false;
 671     }
 672   }
 673   if (!CMSConcurrentMTEnabled) {
 674     ConcGCThreads = 0;
 675   } else {
 676     // Turn off CMSCleanOnEnter optimization temporarily for
 677     // the MT case where it's not fixed yet; see 6178663.
 678     CMSCleanOnEnter = false;
 679   }
 680   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 681          "Inconsistency");
 682 
 683   // Parallel task queues; these are shared for the
 684   // concurrent and stop-world phases of CMS, but
 685   // are not shared with parallel scavenge (ParNew).
 686   {
 687     uint i;
 688     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 689 
 690     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 691          || ParallelRefProcEnabled)
 692         && num_queues > 0) {
 693       _task_queues = new OopTaskQueueSet(num_queues);
 694       if (_task_queues == NULL) {
 695         warning("task_queues allocation failure.");
 696         return;
 697       }
 698       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 699       if (_hash_seed == NULL) {
 700         warning("_hash_seed array allocation failure");
 701         return;
 702       }
 703 
 704       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 705       for (i = 0; i < num_queues; i++) {
 706         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 707         if (q == NULL) {
 708           warning("work_queue allocation failure.");
 709           return;
 710         }
 711         _task_queues->register_queue(i, q);
 712       }
 713       for (i = 0; i < num_queues; i++) {
 714         _task_queues->queue(i)->initialize();
 715         _hash_seed[i] = 17;  // copied from ParNew
 716       }
 717     }
 718   }
 719 
 720   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 721   _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
 722 
 723   // Clip CMSBootstrapOccupancy between 0 and 100.
 724   _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
 725                          /(double)100;
 726 
 727   _full_gcs_since_conc_gc = 0;
 728 
 729   // Now tell CMS generations the identity of their collector
 730   ConcurrentMarkSweepGeneration::set_collector(this);
 731 
 732   // Create & start a CMS thread for this CMS collector
 733   _cmsThread = ConcurrentMarkSweepThread::start(this);
 734   assert(cmsThread() != NULL, "CMS Thread should have been created");
 735   assert(cmsThread()->collector() == this,
 736          "CMS Thread should refer to this gen");
 737   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 738 
 739   // Support for parallelizing young gen rescan
 740   GenCollectedHeap* gch = GenCollectedHeap::heap();
 741   _young_gen = gch->prev_gen(_cmsGen);
 742   if (gch->supports_inline_contig_alloc()) {
 743     _top_addr = gch->top_addr();
 744     _end_addr = gch->end_addr();
 745     assert(_young_gen != NULL, "no _young_gen");
 746     _eden_chunk_index = 0;
 747     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 748     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 749     if (_eden_chunk_array == NULL) {
 750       _eden_chunk_capacity = 0;
 751       warning("GC/CMS: _eden_chunk_array allocation failure");
 752     }
 753   }
 754   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 755 
 756   // Support for parallelizing survivor space rescan
 757   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
 758     const size_t max_plab_samples =
 759       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 760 
 761     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 762     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 763     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 764     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 765         || _cursor == NULL) {
 766       warning("Failed to allocate survivor plab/chunk array");
 767       if (_survivor_plab_array  != NULL) {
 768         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 769         _survivor_plab_array = NULL;
 770       }
 771       if (_survivor_chunk_array != NULL) {
 772         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 773         _survivor_chunk_array = NULL;
 774       }
 775       if (_cursor != NULL) {
 776         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 777         _cursor = NULL;
 778       }
 779     } else {
 780       _survivor_chunk_capacity = 2*max_plab_samples;
 781       for (uint i = 0; i < ParallelGCThreads; i++) {
 782         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 783         if (vec == NULL) {
 784           warning("Failed to allocate survivor plab array");
 785           for (int j = i; j > 0; j--) {
 786             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
 787           }
 788           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 789           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 790           _survivor_plab_array = NULL;
 791           _survivor_chunk_array = NULL;
 792           _survivor_chunk_capacity = 0;
 793           break;
 794         } else {
 795           ChunkArray* cur =
 796             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 797                                                         max_plab_samples);
 798           assert(cur->end() == 0, "Should be 0");
 799           assert(cur->array() == vec, "Should be vec");
 800           assert(cur->capacity() == max_plab_samples, "Error");
 801         }
 802       }
 803     }
 804   }
 805   assert(   (   _survivor_plab_array  != NULL
 806              && _survivor_chunk_array != NULL)
 807          || (   _survivor_chunk_capacity == 0
 808              && _survivor_chunk_index == 0),
 809          "Error");
 810 
 811   // Choose what strong roots should be scanned depending on verification options
 812   // and perm gen collection mode.
 813   if (!CMSClassUnloadingEnabled) {
 814     // If class unloading is disabled we want to include all classes into the root set.
 815     add_root_scanning_option(SharedHeap::SO_AllClasses);
 816   } else {
 817     add_root_scanning_option(SharedHeap::SO_SystemClasses);
 818   }
 819 
 820   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 821   _gc_counters = new CollectorCounters("CMS", 1);
 822   _completed_initialization = true;
 823   _inter_sweep_timer.start();  // start of time
 824 }
 825 
 826 const char* ConcurrentMarkSweepGeneration::name() const {
 827   return "concurrent mark-sweep generation";
 828 }
 829 void ConcurrentMarkSweepGeneration::update_counters() {
 830   if (UsePerfData) {
 831     _space_counters->update_all();
 832     _gen_counters->update_all();
 833   }
 834 }
 835 
 836 // this is an optimized version of update_counters(). it takes the
 837 // used value as a parameter rather than computing it.
 838 //
 839 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 840   if (UsePerfData) {
 841     _space_counters->update_used(used);
 842     _space_counters->update_capacity();
 843     _gen_counters->update_all();
 844   }
 845 }
 846 
 847 void ConcurrentMarkSweepGeneration::print() const {
 848   Generation::print();
 849   cmsSpace()->print();
 850 }
 851 
 852 #ifndef PRODUCT
 853 void ConcurrentMarkSweepGeneration::print_statistics() {
 854   cmsSpace()->printFLCensus(0);
 855 }
 856 #endif
 857 
 858 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 859   GenCollectedHeap* gch = GenCollectedHeap::heap();
 860   if (PrintGCDetails) {
 861     if (Verbose) {
 862       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 863         level(), short_name(), s, used(), capacity());
 864     } else {
 865       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 866         level(), short_name(), s, used() / K, capacity() / K);
 867     }
 868   }
 869   if (Verbose) {
 870     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 871               gch->used(), gch->capacity());
 872   } else {
 873     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 874               gch->used() / K, gch->capacity() / K);
 875   }
 876 }
 877 
 878 size_t
 879 ConcurrentMarkSweepGeneration::contiguous_available() const {
 880   // dld proposes an improvement in precision here. If the committed
 881   // part of the space ends in a free block we should add that to
 882   // uncommitted size in the calculation below. Will make this
 883   // change later, staying with the approximation below for the
 884   // time being. -- ysr.
 885   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 886 }
 887 
 888 size_t
 889 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 890   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 891 }
 892 
 893 size_t ConcurrentMarkSweepGeneration::max_available() const {
 894   return free() + _virtual_space.uncommitted_size();
 895 }
 896 
 897 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 898   size_t available = max_available();
 899   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 900   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 901   if (Verbose && PrintGCDetails) {
 902     gclog_or_tty->print_cr(
 903       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 904       "max_promo("SIZE_FORMAT")",
 905       res? "":" not", available, res? ">=":"<",
 906       av_promo, max_promotion_in_bytes);
 907   }
 908   return res;
 909 }
 910 
 911 // At a promotion failure dump information on block layout in heap
 912 // (cms old generation).
 913 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 914   if (CMSDumpAtPromotionFailure) {
 915     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 916   }
 917 }
 918 
 919 CompactibleSpace*
 920 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 921   return _cmsSpace;
 922 }
 923 
 924 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 925   // Clear the promotion information.  These pointers can be adjusted
 926   // along with all the other pointers into the heap but
 927   // compaction is expected to be a rare event with
 928   // a heap using cms so don't do it without seeing the need.
 929   if (CollectedHeap::use_parallel_gc_threads()) {
 930     for (uint i = 0; i < ParallelGCThreads; i++) {
 931       _par_gc_thread_states[i]->promo.reset();
 932     }
 933   }
 934 }
 935 
 936 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
 937   blk->do_space(_cmsSpace);
 938 }
 939 
 940 void ConcurrentMarkSweepGeneration::compute_new_size() {
 941   assert_locked_or_safepoint(Heap_lock);
 942 
 943   // If incremental collection failed, we just want to expand
 944   // to the limit.
 945   if (incremental_collection_failed()) {
 946     clear_incremental_collection_failed();
 947     grow_to_reserved();
 948     return;
 949   }
 950 
 951   size_t expand_bytes = 0;
 952   double free_percentage = ((double) free()) / capacity();
 953   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 954   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 955 
 956   // compute expansion delta needed for reaching desired free percentage
 957   if (free_percentage < desired_free_percentage) {
 958     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 959     assert(desired_capacity >= capacity(), "invalid expansion size");
 960     expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 961   }
 962   if (expand_bytes > 0) {
 963     if (PrintGCDetails && Verbose) {
 964       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 965       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 966       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 967       gclog_or_tty->print_cr("  Desired free fraction %f",
 968         desired_free_percentage);
 969       gclog_or_tty->print_cr("  Maximum free fraction %f",
 970         maximum_free_percentage);
 971       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
 972       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 973         desired_capacity/1000);
 974       int prev_level = level() - 1;
 975       if (prev_level >= 0) {
 976         size_t prev_size = 0;
 977         GenCollectedHeap* gch = GenCollectedHeap::heap();
 978         Generation* prev_gen = gch->_gens[prev_level];
 979         prev_size = prev_gen->capacity();
 980           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 981                                  prev_size/1000);
 982       }
 983       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 984         unsafe_max_alloc_nogc()/1000);
 985       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 986         contiguous_available()/1000);
 987       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 988         expand_bytes);
 989     }
 990     // safe if expansion fails
 991     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 992     if (PrintGCDetails && Verbose) {
 993       gclog_or_tty->print_cr("  Expanded free fraction %f",
 994         ((double) free()) / capacity());
 995     }
 996   }
 997 }
 998 
 999 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1000   return cmsSpace()->freelistLock();
1001 }
1002 
1003 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1004                                                   bool   tlab) {
1005   CMSSynchronousYieldRequest yr;
1006   MutexLockerEx x(freelistLock(),
1007                   Mutex::_no_safepoint_check_flag);
1008   return have_lock_and_allocate(size, tlab);
1009 }
1010 
1011 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1012                                                   bool   tlab /* ignored */) {
1013   assert_lock_strong(freelistLock());
1014   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1015   HeapWord* res = cmsSpace()->allocate(adjustedSize);
1016   // Allocate the object live (grey) if the background collector has
1017   // started marking. This is necessary because the marker may
1018   // have passed this address and consequently this object will
1019   // not otherwise be greyed and would be incorrectly swept up.
1020   // Note that if this object contains references, the writing
1021   // of those references will dirty the card containing this object
1022   // allowing the object to be blackened (and its references scanned)
1023   // either during a preclean phase or at the final checkpoint.
1024   if (res != NULL) {
1025     // We may block here with an uninitialized object with
1026     // its mark-bit or P-bits not yet set. Such objects need
1027     // to be safely navigable by block_start().
1028     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1029     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1030     collector()->direct_allocated(res, adjustedSize);
1031     _direct_allocated_words += adjustedSize;
1032     // allocation counters
1033     NOT_PRODUCT(
1034       _numObjectsAllocated++;
1035       _numWordsAllocated += (int)adjustedSize;
1036     )
1037   }
1038   return res;
1039 }
1040 
1041 // In the case of direct allocation by mutators in a generation that
1042 // is being concurrently collected, the object must be allocated
1043 // live (grey) if the background collector has started marking.
1044 // This is necessary because the marker may
1045 // have passed this address and consequently this object will
1046 // not otherwise be greyed and would be incorrectly swept up.
1047 // Note that if this object contains references, the writing
1048 // of those references will dirty the card containing this object
1049 // allowing the object to be blackened (and its references scanned)
1050 // either during a preclean phase or at the final checkpoint.
1051 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1052   assert(_markBitMap.covers(start, size), "Out of bounds");
1053   if (_collectorState >= Marking) {
1054     MutexLockerEx y(_markBitMap.lock(),
1055                     Mutex::_no_safepoint_check_flag);
1056     // [see comments preceding SweepClosure::do_blk() below for details]
1057     // 1. need to mark the object as live so it isn't collected
1058     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1059     // 3. need to mark the end of the object so marking, precleaning or sweeping
1060     //    can skip over uninitialized or unparsable objects. An allocated
1061     //    object is considered uninitialized for our purposes as long as
1062     //    its klass word is NULL. (Unparsable objects are those which are
1063     //    initialized in the sense just described, but whose sizes can still
1064     //    not be correctly determined. Note that the class of unparsable objects
1065     //    can only occur in the perm gen. All old gen objects are parsable
1066     //    as soon as they are initialized.)
1067     _markBitMap.mark(start);          // object is live
1068     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
1069     _markBitMap.mark(start + size - 1);
1070                                       // mark end of object
1071   }
1072   // check that oop looks uninitialized
1073   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1074 }
1075 
1076 void CMSCollector::promoted(bool par, HeapWord* start,
1077                             bool is_obj_array, size_t obj_size) {
1078   assert(_markBitMap.covers(start), "Out of bounds");
1079   // See comment in direct_allocated() about when objects should
1080   // be allocated live.
1081   if (_collectorState >= Marking) {
1082     // we already hold the marking bit map lock, taken in
1083     // the prologue
1084     if (par) {
1085       _markBitMap.par_mark(start);
1086     } else {
1087       _markBitMap.mark(start);
1088     }
1089     // We don't need to mark the object as uninitialized (as
1090     // in direct_allocated above) because this is being done with the
1091     // world stopped and the object will be initialized by the
1092     // time the marking, precleaning or sweeping get to look at it.
1093     // But see the code for copying objects into the CMS generation,
1094     // where we need to ensure that concurrent readers of the
1095     // block offset table are able to safely navigate a block that
1096     // is in flux from being free to being allocated (and in
1097     // transition while being copied into) and subsequently
1098     // becoming a bona-fide object when the copy/promotion is complete.
1099     assert(SafepointSynchronize::is_at_safepoint(),
1100            "expect promotion only at safepoints");
1101 
1102     if (_collectorState < Sweeping) {
1103       // Mark the appropriate cards in the modUnionTable, so that
1104       // this object gets scanned before the sweep. If this is
1105       // not done, CMS generation references in the object might
1106       // not get marked.
1107       // For the case of arrays, which are otherwise precisely
1108       // marked, we need to dirty the entire array, not just its head.
1109       if (is_obj_array) {
1110         // The [par_]mark_range() method expects mr.end() below to
1111         // be aligned to the granularity of a bit's representation
1112         // in the heap. In the case of the MUT below, that's a
1113         // card size.
1114         MemRegion mr(start,
1115                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1116                         CardTableModRefBS::card_size /* bytes */));
1117         if (par) {
1118           _modUnionTable.par_mark_range(mr);
1119         } else {
1120           _modUnionTable.mark_range(mr);
1121         }
1122       } else {  // not an obj array; we can just mark the head
1123         if (par) {
1124           _modUnionTable.par_mark(start);
1125         } else {
1126           _modUnionTable.mark(start);
1127         }
1128       }
1129     }
1130   }
1131 }
1132 
1133 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1134 {
1135   size_t delta = pointer_delta(addr, space->bottom());
1136   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1137 }
1138 
1139 void CMSCollector::icms_update_allocation_limits()
1140 {
1141   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1142   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1143 
1144   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1145   if (CMSTraceIncrementalPacing) {
1146     stats().print();
1147   }
1148 
1149   assert(duty_cycle <= 100, "invalid duty cycle");
1150   if (duty_cycle != 0) {
1151     // The duty_cycle is a percentage between 0 and 100; convert to words and
1152     // then compute the offset from the endpoints of the space.
1153     size_t free_words = eden->free() / HeapWordSize;
1154     double free_words_dbl = (double)free_words;
1155     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1156     size_t offset_words = (free_words - duty_cycle_words) / 2;
1157 
1158     _icms_start_limit = eden->top() + offset_words;
1159     _icms_stop_limit = eden->end() - offset_words;
1160 
1161     // The limits may be adjusted (shifted to the right) by
1162     // CMSIncrementalOffset, to allow the application more mutator time after a
1163     // young gen gc (when all mutators were stopped) and before CMS starts and
1164     // takes away one or more cpus.
1165     if (CMSIncrementalOffset != 0) {
1166       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1167       size_t adjustment = (size_t)adjustment_dbl;
1168       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1169       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1170         _icms_start_limit += adjustment;
1171         _icms_stop_limit = tmp_stop;
1172       }
1173     }
1174   }
1175   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1176     _icms_start_limit = _icms_stop_limit = eden->end();
1177   }
1178 
1179   // Install the new start limit.
1180   eden->set_soft_end(_icms_start_limit);
1181 
1182   if (CMSTraceIncrementalMode) {
1183     gclog_or_tty->print(" icms alloc limits:  "
1184                            PTR_FORMAT "," PTR_FORMAT
1185                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1186                            _icms_start_limit, _icms_stop_limit,
1187                            percent_of_space(eden, _icms_start_limit),
1188                            percent_of_space(eden, _icms_stop_limit));
1189     if (Verbose) {
1190       gclog_or_tty->print("eden:  ");
1191       eden->print_on(gclog_or_tty);
1192     }
1193   }
1194 }
1195 
1196 // Any changes here should try to maintain the invariant
1197 // that if this method is called with _icms_start_limit
1198 // and _icms_stop_limit both NULL, then it should return NULL
1199 // and not notify the icms thread.
1200 HeapWord*
1201 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1202                                        size_t word_size)
1203 {
1204   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1205   // nop.
1206   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1207     if (top <= _icms_start_limit) {
1208       if (CMSTraceIncrementalMode) {
1209         space->print_on(gclog_or_tty);
1210         gclog_or_tty->stamp();
1211         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1212                                ", new limit=" PTR_FORMAT
1213                                " (" SIZE_FORMAT "%%)",
1214                                top, _icms_stop_limit,
1215                                percent_of_space(space, _icms_stop_limit));
1216       }
1217       ConcurrentMarkSweepThread::start_icms();
1218       assert(top < _icms_stop_limit, "Tautology");
1219       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1220         return _icms_stop_limit;
1221       }
1222 
1223       // The allocation will cross both the _start and _stop limits, so do the
1224       // stop notification also and return end().
1225       if (CMSTraceIncrementalMode) {
1226         space->print_on(gclog_or_tty);
1227         gclog_or_tty->stamp();
1228         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1229                                ", new limit=" PTR_FORMAT
1230                                " (" SIZE_FORMAT "%%)",
1231                                top, space->end(),
1232                                percent_of_space(space, space->end()));
1233       }
1234       ConcurrentMarkSweepThread::stop_icms();
1235       return space->end();
1236     }
1237 
1238     if (top <= _icms_stop_limit) {
1239       if (CMSTraceIncrementalMode) {
1240         space->print_on(gclog_or_tty);
1241         gclog_or_tty->stamp();
1242         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1243                                ", new limit=" PTR_FORMAT
1244                                " (" SIZE_FORMAT "%%)",
1245                                top, space->end(),
1246                                percent_of_space(space, space->end()));
1247       }
1248       ConcurrentMarkSweepThread::stop_icms();
1249       return space->end();
1250     }
1251 
1252     if (CMSTraceIncrementalMode) {
1253       space->print_on(gclog_or_tty);
1254       gclog_or_tty->stamp();
1255       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1256                              ", new limit=" PTR_FORMAT,
1257                              top, NULL);
1258     }
1259   }
1260 
1261   return NULL;
1262 }
1263 
1264 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1265   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1266   // allocate, copy and if necessary update promoinfo --
1267   // delegate to underlying space.
1268   assert_lock_strong(freelistLock());
1269 
1270 #ifndef PRODUCT
1271   if (Universe::heap()->promotion_should_fail()) {
1272     return NULL;
1273   }
1274 #endif  // #ifndef PRODUCT
1275 
1276   oop res = _cmsSpace->promote(obj, obj_size);
1277   if (res == NULL) {
1278     // expand and retry
1279     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1280     expand(s*HeapWordSize, MinHeapDeltaBytes,
1281       CMSExpansionCause::_satisfy_promotion);
1282     // Since there's currently no next generation, we don't try to promote
1283     // into a more senior generation.
1284     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1285                                "is made to pass on a possibly failing "
1286                                "promotion to next generation");
1287     res = _cmsSpace->promote(obj, obj_size);
1288   }
1289   if (res != NULL) {
1290     // See comment in allocate() about when objects should
1291     // be allocated live.
1292     assert(obj->is_oop(), "Will dereference klass pointer below");
1293     collector()->promoted(false,           // Not parallel
1294                           (HeapWord*)res, obj->is_objArray(), obj_size);
1295     // promotion counters
1296     NOT_PRODUCT(
1297       _numObjectsPromoted++;
1298       _numWordsPromoted +=
1299         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1300     )
1301   }
1302   return res;
1303 }
1304 
1305 
1306 HeapWord*
1307 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1308                                              HeapWord* top,
1309                                              size_t word_sz)
1310 {
1311   return collector()->allocation_limit_reached(space, top, word_sz);
1312 }
1313 
1314 // IMPORTANT: Notes on object size recognition in CMS.
1315 // ---------------------------------------------------
1316 // A block of storage in the CMS generation is always in
1317 // one of three states. A free block (FREE), an allocated
1318 // object (OBJECT) whose size() method reports the correct size,
1319 // and an intermediate state (TRANSIENT) in which its size cannot
1320 // be accurately determined.
1321 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1322 // -----------------------------------------------------
1323 // FREE:      klass_word & 1 == 1; mark_word holds block size
1324 //
1325 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1326 //            obj->size() computes correct size
1327 //            [Perm Gen objects needs to be "parsable" before they can be navigated]
1328 //
1329 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1330 //
1331 // STATE IDENTIFICATION: (64 bit+COOPS)
1332 // ------------------------------------
1333 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1334 //
1335 // OBJECT:    klass_word installed; klass_word != 0;
1336 //            obj->size() computes correct size
1337 //            [Perm Gen comment above continues to hold]
1338 //
1339 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1340 //
1341 //
1342 // STATE TRANSITION DIAGRAM
1343 //
1344 //        mut / parnew                     mut  /  parnew
1345 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1346 //  ^                                                                   |
1347 //  |------------------------ DEAD <------------------------------------|
1348 //         sweep                            mut
1349 //
1350 // While a block is in TRANSIENT state its size cannot be determined
1351 // so readers will either need to come back later or stall until
1352 // the size can be determined. Note that for the case of direct
1353 // allocation, P-bits, when available, may be used to determine the
1354 // size of an object that may not yet have been initialized.
1355 
1356 // Things to support parallel young-gen collection.
1357 oop
1358 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1359                                            oop old, markOop m,
1360                                            size_t word_sz) {
1361 #ifndef PRODUCT
1362   if (Universe::heap()->promotion_should_fail()) {
1363     return NULL;
1364   }
1365 #endif  // #ifndef PRODUCT
1366 
1367   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1368   PromotionInfo* promoInfo = &ps->promo;
1369   // if we are tracking promotions, then first ensure space for
1370   // promotion (including spooling space for saving header if necessary).
1371   // then allocate and copy, then track promoted info if needed.
1372   // When tracking (see PromotionInfo::track()), the mark word may
1373   // be displaced and in this case restoration of the mark word
1374   // occurs in the (oop_since_save_marks_)iterate phase.
1375   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1376     // Out of space for allocating spooling buffers;
1377     // try expanding and allocating spooling buffers.
1378     if (!expand_and_ensure_spooling_space(promoInfo)) {
1379       return NULL;
1380     }
1381   }
1382   assert(promoInfo->has_spooling_space(), "Control point invariant");
1383   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1384   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1385   if (obj_ptr == NULL) {
1386      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1387      if (obj_ptr == NULL) {
1388        return NULL;
1389      }
1390   }
1391   oop obj = oop(obj_ptr);
1392   OrderAccess::storestore();
1393   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1394   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1395   // IMPORTANT: See note on object initialization for CMS above.
1396   // Otherwise, copy the object.  Here we must be careful to insert the
1397   // klass pointer last, since this marks the block as an allocated object.
1398   // Except with compressed oops it's the mark word.
1399   HeapWord* old_ptr = (HeapWord*)old;
1400   // Restore the mark word copied above.
1401   obj->set_mark(m);
1402   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1403   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1404   OrderAccess::storestore();
1405 
1406   if (UseCompressedOops) {
1407     // Copy gap missed by (aligned) header size calculation below
1408     obj->set_klass_gap(old->klass_gap());
1409   }
1410   if (word_sz > (size_t)oopDesc::header_size()) {
1411     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1412                                  obj_ptr + oopDesc::header_size(),
1413                                  word_sz - oopDesc::header_size());
1414   }
1415 
1416   // Now we can track the promoted object, if necessary.  We take care
1417   // to delay the transition from uninitialized to full object
1418   // (i.e., insertion of klass pointer) until after, so that it
1419   // atomically becomes a promoted object.
1420   if (promoInfo->tracking()) {
1421     promoInfo->track((PromotedObject*)obj, old->klass());
1422   }
1423   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1424   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1425   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1426 
1427   // Finally, install the klass pointer (this should be volatile).
1428   OrderAccess::storestore();
1429   obj->set_klass(old->klass());
1430   // We should now be able to calculate the right size for this object
1431   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1432 
1433   collector()->promoted(true,          // parallel
1434                         obj_ptr, old->is_objArray(), word_sz);
1435 
1436   NOT_PRODUCT(
1437     Atomic::inc_ptr(&_numObjectsPromoted);
1438     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1439   )
1440 
1441   return obj;
1442 }
1443 
1444 void
1445 ConcurrentMarkSweepGeneration::
1446 par_promote_alloc_undo(int thread_num,
1447                        HeapWord* obj, size_t word_sz) {
1448   // CMS does not support promotion undo.
1449   ShouldNotReachHere();
1450 }
1451 
1452 void
1453 ConcurrentMarkSweepGeneration::
1454 par_promote_alloc_done(int thread_num) {
1455   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1456   ps->lab.retire(thread_num);
1457 }
1458 
1459 void
1460 ConcurrentMarkSweepGeneration::
1461 par_oop_since_save_marks_iterate_done(int thread_num) {
1462   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1463   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1464   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1465 }
1466 
1467 // XXXPERM
1468 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1469                                                    size_t size,
1470                                                    bool   tlab)
1471 {
1472   // We allow a STW collection only if a full
1473   // collection was requested.
1474   return full || should_allocate(size, tlab); // FIX ME !!!
1475   // This and promotion failure handling are connected at the
1476   // hip and should be fixed by untying them.
1477 }
1478 
1479 bool CMSCollector::shouldConcurrentCollect() {
1480   if (_full_gc_requested) {
1481     if (Verbose && PrintGCDetails) {
1482       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1483                              " gc request (or gc_locker)");
1484     }
1485     return true;
1486   }
1487 
1488   // For debugging purposes, change the type of collection.
1489   // If the rotation is not on the concurrent collection
1490   // type, don't start a concurrent collection.
1491   NOT_PRODUCT(
1492     if (RotateCMSCollectionTypes &&
1493         (_cmsGen->debug_collection_type() !=
1494           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1495       assert(_cmsGen->debug_collection_type() !=
1496         ConcurrentMarkSweepGeneration::Unknown_collection_type,
1497         "Bad cms collection type");
1498       return false;
1499     }
1500   )
1501 
1502   FreelistLocker x(this);
1503   // ------------------------------------------------------------------
1504   // Print out lots of information which affects the initiation of
1505   // a collection.
1506   if (PrintCMSInitiationStatistics && stats().valid()) {
1507     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1508     gclog_or_tty->stamp();
1509     gclog_or_tty->print_cr("");
1510     stats().print_on(gclog_or_tty);
1511     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1512       stats().time_until_cms_gen_full());
1513     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1514     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1515                            _cmsGen->contiguous_available());
1516     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1517     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1518     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1519     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1520     gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1521   }
1522   // ------------------------------------------------------------------
1523 
1524   // If the estimated time to complete a cms collection (cms_duration())
1525   // is less than the estimated time remaining until the cms generation
1526   // is full, start a collection.
1527   if (!UseCMSInitiatingOccupancyOnly) {
1528     if (stats().valid()) {
1529       if (stats().time_until_cms_start() == 0.0) {
1530         return true;
1531       }
1532     } else {
1533       // We want to conservatively collect somewhat early in order
1534       // to try and "bootstrap" our CMS/promotion statistics;
1535       // this branch will not fire after the first successful CMS
1536       // collection because the stats should then be valid.
1537       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1538         if (Verbose && PrintGCDetails) {
1539           gclog_or_tty->print_cr(
1540             " CMSCollector: collect for bootstrapping statistics:"
1541             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1542             _bootstrap_occupancy);
1543         }
1544         return true;
1545       }
1546     }
1547   }
1548 
1549   // Otherwise, we start a collection cycle if either the perm gen or
1550   // old gen want a collection cycle started. Each may use
1551   // an appropriate criterion for making this decision.
1552   // XXX We need to make sure that the gen expansion
1553   // criterion dovetails well with this. XXX NEED TO FIX THIS
1554   if (_cmsGen->should_concurrent_collect()) {
1555     if (Verbose && PrintGCDetails) {
1556       gclog_or_tty->print_cr("CMS old gen initiated");
1557     }
1558     return true;
1559   }
1560 
1561   // We start a collection if we believe an incremental collection may fail;
1562   // this is not likely to be productive in practice because it's probably too
1563   // late anyway.
1564   GenCollectedHeap* gch = GenCollectedHeap::heap();
1565   assert(gch->collector_policy()->is_two_generation_policy(),
1566          "You may want to check the correctness of the following");
1567   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1568     if (Verbose && PrintGCDetails) {
1569       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1570     }
1571     return true;
1572   }
1573 
1574   if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1575     bool res = update_should_unload_classes();
1576     if (res) {
1577       if (Verbose && PrintGCDetails) {
1578         gclog_or_tty->print_cr("CMS perm gen initiated");
1579       }
1580       return true;
1581     }
1582   }
1583   return false;
1584 }
1585 
1586 // Clear _expansion_cause fields of constituent generations
1587 void CMSCollector::clear_expansion_cause() {
1588   _cmsGen->clear_expansion_cause();
1589   _permGen->clear_expansion_cause();
1590 }
1591 
1592 // We should be conservative in starting a collection cycle.  To
1593 // start too eagerly runs the risk of collecting too often in the
1594 // extreme.  To collect too rarely falls back on full collections,
1595 // which works, even if not optimum in terms of concurrent work.
1596 // As a work around for too eagerly collecting, use the flag
1597 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1598 // giving the user an easily understandable way of controlling the
1599 // collections.
1600 // We want to start a new collection cycle if any of the following
1601 // conditions hold:
1602 // . our current occupancy exceeds the configured initiating occupancy
1603 //   for this generation, or
1604 // . we recently needed to expand this space and have not, since that
1605 //   expansion, done a collection of this generation, or
1606 // . the underlying space believes that it may be a good idea to initiate
1607 //   a concurrent collection (this may be based on criteria such as the
1608 //   following: the space uses linear allocation and linear allocation is
1609 //   going to fail, or there is believed to be excessive fragmentation in
1610 //   the generation, etc... or ...
1611 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1612 //   the case of the old generation, not the perm generation; see CR 6543076):
1613 //   we may be approaching a point at which allocation requests may fail because
1614 //   we will be out of sufficient free space given allocation rate estimates.]
1615 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1616 
1617   assert_lock_strong(freelistLock());
1618   if (occupancy() > initiating_occupancy()) {
1619     if (PrintGCDetails && Verbose) {
1620       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1621         short_name(), occupancy(), initiating_occupancy());
1622     }
1623     return true;
1624   }
1625   if (UseCMSInitiatingOccupancyOnly) {
1626     return false;
1627   }
1628   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1629     if (PrintGCDetails && Verbose) {
1630       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1631         short_name());
1632     }
1633     return true;
1634   }
1635   if (_cmsSpace->should_concurrent_collect()) {
1636     if (PrintGCDetails && Verbose) {
1637       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1638         short_name());
1639     }
1640     return true;
1641   }
1642   return false;
1643 }
1644 
1645 void ConcurrentMarkSweepGeneration::collect(bool   full,
1646                                             bool   clear_all_soft_refs,
1647                                             size_t size,
1648                                             bool   tlab)
1649 {
1650   collector()->collect(full, clear_all_soft_refs, size, tlab);
1651 }
1652 
1653 void CMSCollector::collect(bool   full,
1654                            bool   clear_all_soft_refs,
1655                            size_t size,
1656                            bool   tlab)
1657 {
1658   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1659     // For debugging purposes skip the collection if the state
1660     // is not currently idle
1661     if (TraceCMSState) {
1662       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1663         Thread::current(), full, _collectorState);
1664     }
1665     return;
1666   }
1667 
1668   // The following "if" branch is present for defensive reasons.
1669   // In the current uses of this interface, it can be replaced with:
1670   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1671   // But I am not placing that assert here to allow future
1672   // generality in invoking this interface.
1673   if (GC_locker::is_active()) {
1674     // A consistency test for GC_locker
1675     assert(GC_locker::needs_gc(), "Should have been set already");
1676     // Skip this foreground collection, instead
1677     // expanding the heap if necessary.
1678     // Need the free list locks for the call to free() in compute_new_size()
1679     compute_new_size();
1680     return;
1681   }
1682   acquire_control_and_collect(full, clear_all_soft_refs);
1683   _full_gcs_since_conc_gc++;
1684 
1685 }
1686 
1687 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1688   GenCollectedHeap* gch = GenCollectedHeap::heap();
1689   unsigned int gc_count = gch->total_full_collections();
1690   if (gc_count == full_gc_count) {
1691     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1692     _full_gc_requested = true;
1693     _full_gc_cause = cause;
1694     CGC_lock->notify();   // nudge CMS thread
1695   } else {
1696     assert(gc_count > full_gc_count, "Error: causal loop");
1697   }
1698 }
1699 
1700 bool CMSCollector::is_external_interruption() {
1701   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1702   return GCCause::is_user_requested_gc(cause) ||
1703          GCCause::is_serviceability_requested_gc(cause);
1704 }
1705 
1706 void CMSCollector::report_concurrent_mode_interruption() {
1707   if (is_external_interruption()) {
1708     if (PrintGCDetails) {
1709       gclog_or_tty->print(" (concurrent mode interrupted)");
1710     }
1711   } else {
1712     if (PrintGCDetails) {
1713       gclog_or_tty->print(" (concurrent mode failure)");
1714     }
1715     _gc_tracer_cm->report_concurrent_mode_failure();
1716   }
1717 }
1718 
1719 // The foreground and background collectors need to coordinate in order
1720 // to make sure that they do not mutually interfere with CMS collections.
1721 // When a background collection is active,
1722 // the foreground collector may need to take over (preempt) and
1723 // synchronously complete an ongoing collection. Depending on the
1724 // frequency of the background collections and the heap usage
1725 // of the application, this preemption can be seldom or frequent.
1726 // There are only certain
1727 // points in the background collection that the "collection-baton"
1728 // can be passed to the foreground collector.
1729 //
1730 // The foreground collector will wait for the baton before
1731 // starting any part of the collection.  The foreground collector
1732 // will only wait at one location.
1733 //
1734 // The background collector will yield the baton before starting a new
1735 // phase of the collection (e.g., before initial marking, marking from roots,
1736 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1737 // of the loop which switches the phases. The background collector does some
1738 // of the phases (initial mark, final re-mark) with the world stopped.
1739 // Because of locking involved in stopping the world,
1740 // the foreground collector should not block waiting for the background
1741 // collector when it is doing a stop-the-world phase.  The background
1742 // collector will yield the baton at an additional point just before
1743 // it enters a stop-the-world phase.  Once the world is stopped, the
1744 // background collector checks the phase of the collection.  If the
1745 // phase has not changed, it proceeds with the collection.  If the
1746 // phase has changed, it skips that phase of the collection.  See
1747 // the comments on the use of the Heap_lock in collect_in_background().
1748 //
1749 // Variable used in baton passing.
1750 //   _foregroundGCIsActive - Set to true by the foreground collector when
1751 //      it wants the baton.  The foreground clears it when it has finished
1752 //      the collection.
1753 //   _foregroundGCShouldWait - Set to true by the background collector
1754 //        when it is running.  The foreground collector waits while
1755 //      _foregroundGCShouldWait is true.
1756 //  CGC_lock - monitor used to protect access to the above variables
1757 //      and to notify the foreground and background collectors.
1758 //  _collectorState - current state of the CMS collection.
1759 //
1760 // The foreground collector
1761 //   acquires the CGC_lock
1762 //   sets _foregroundGCIsActive
1763 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1764 //     various locks acquired in preparation for the collection
1765 //     are released so as not to block the background collector
1766 //     that is in the midst of a collection
1767 //   proceeds with the collection
1768 //   clears _foregroundGCIsActive
1769 //   returns
1770 //
1771 // The background collector in a loop iterating on the phases of the
1772 //      collection
1773 //   acquires the CGC_lock
1774 //   sets _foregroundGCShouldWait
1775 //   if _foregroundGCIsActive is set
1776 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1777 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1778 //     and exits the loop.
1779 //   otherwise
1780 //     proceed with that phase of the collection
1781 //     if the phase is a stop-the-world phase,
1782 //       yield the baton once more just before enqueueing
1783 //       the stop-world CMS operation (executed by the VM thread).
1784 //   returns after all phases of the collection are done
1785 //
1786 
1787 void CMSCollector::acquire_control_and_collect(bool full,
1788         bool clear_all_soft_refs) {
1789   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1790   assert(!Thread::current()->is_ConcurrentGC_thread(),
1791          "shouldn't try to acquire control from self!");
1792 
1793   // Start the protocol for acquiring control of the
1794   // collection from the background collector (aka CMS thread).
1795   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1796          "VM thread should have CMS token");
1797   // Remember the possibly interrupted state of an ongoing
1798   // concurrent collection
1799   CollectorState first_state = _collectorState;
1800 
1801   // Signal to a possibly ongoing concurrent collection that
1802   // we want to do a foreground collection.
1803   _foregroundGCIsActive = true;
1804 
1805   // Disable incremental mode during a foreground collection.
1806   ICMSDisabler icms_disabler;
1807 
1808   // release locks and wait for a notify from the background collector
1809   // releasing the locks in only necessary for phases which
1810   // do yields to improve the granularity of the collection.
1811   assert_lock_strong(bitMapLock());
1812   // We need to lock the Free list lock for the space that we are
1813   // currently collecting.
1814   assert(haveFreelistLocks(), "Must be holding free list locks");
1815   bitMapLock()->unlock();
1816   releaseFreelistLocks();
1817   {
1818     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1819     if (_foregroundGCShouldWait) {
1820       // We are going to be waiting for action for the CMS thread;
1821       // it had better not be gone (for instance at shutdown)!
1822       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1823              "CMS thread must be running");
1824       // Wait here until the background collector gives us the go-ahead
1825       ConcurrentMarkSweepThread::clear_CMS_flag(
1826         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1827       // Get a possibly blocked CMS thread going:
1828       //   Note that we set _foregroundGCIsActive true above,
1829       //   without protection of the CGC_lock.
1830       CGC_lock->notify();
1831       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1832              "Possible deadlock");
1833       while (_foregroundGCShouldWait) {
1834         // wait for notification
1835         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1836         // Possibility of delay/starvation here, since CMS token does
1837         // not know to give priority to VM thread? Actually, i think
1838         // there wouldn't be any delay/starvation, but the proof of
1839         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1840       }
1841       ConcurrentMarkSweepThread::set_CMS_flag(
1842         ConcurrentMarkSweepThread::CMS_vm_has_token);
1843     }
1844   }
1845   // The CMS_token is already held.  Get back the other locks.
1846   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1847          "VM thread should have CMS token");
1848   getFreelistLocks();
1849   bitMapLock()->lock_without_safepoint_check();
1850   if (TraceCMSState) {
1851     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1852       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1853     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1854   }
1855 
1856   // Check if we need to do a compaction, or if not, whether
1857   // we need to start the mark-sweep from scratch.
1858   bool should_compact    = false;
1859   bool should_start_over = false;
1860   decide_foreground_collection_type(clear_all_soft_refs,
1861     &should_compact, &should_start_over);
1862 
1863 NOT_PRODUCT(
1864   if (RotateCMSCollectionTypes) {
1865     if (_cmsGen->debug_collection_type() ==
1866         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1867       should_compact = true;
1868     } else if (_cmsGen->debug_collection_type() ==
1869                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1870       should_compact = false;
1871     }
1872   }
1873 )
1874 
1875   if (first_state > Idling) {
1876     report_concurrent_mode_interruption();
1877   }
1878 
1879   if (should_compact) {
1880     // If the collection is being acquired from the background
1881     // collector, there may be references on the discovered
1882     // references lists that have NULL referents (being those
1883     // that were concurrently cleared by a mutator) or
1884     // that are no longer active (having been enqueued concurrently
1885     // by the mutator).
1886     // Scrub the list of those references because Mark-Sweep-Compact
1887     // code assumes referents are not NULL and that all discovered
1888     // Reference objects are active.
1889     ref_processor()->clean_up_discovered_references();
1890 
1891     do_compaction_work(clear_all_soft_refs);
1892 
1893     // Has the GC time limit been exceeded?
1894     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1895     size_t max_eden_size = young_gen->max_capacity() -
1896                            young_gen->to()->capacity() -
1897                            young_gen->from()->capacity();
1898     GenCollectedHeap* gch = GenCollectedHeap::heap();
1899     GCCause::Cause gc_cause = gch->gc_cause();
1900     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1901                                            young_gen->eden()->used(),
1902                                            _cmsGen->max_capacity(),
1903                                            max_eden_size,
1904                                            full,
1905                                            gc_cause,
1906                                            gch->collector_policy());
1907   } else {
1908     do_mark_sweep_work(clear_all_soft_refs, first_state,
1909       should_start_over);
1910   }
1911   // Reset the expansion cause, now that we just completed
1912   // a collection cycle.
1913   clear_expansion_cause();
1914   _foregroundGCIsActive = false;
1915   return;
1916 }
1917 
1918 // Resize the perm generation and the tenured generation
1919 // after obtaining the free list locks for the
1920 // two generations.
1921 void CMSCollector::compute_new_size() {
1922   assert_locked_or_safepoint(Heap_lock);
1923   FreelistLocker z(this);
1924   _permGen->compute_new_size();
1925   _cmsGen->compute_new_size();
1926 }
1927 
1928 // A work method used by foreground collection to determine
1929 // what type of collection (compacting or not, continuing or fresh)
1930 // it should do.
1931 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1932 // and CMSCompactWhenClearAllSoftRefs the default in the future
1933 // and do away with the flags after a suitable period.
1934 void CMSCollector::decide_foreground_collection_type(
1935   bool clear_all_soft_refs, bool* should_compact,
1936   bool* should_start_over) {
1937   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1938   // flag is set, and we have either requested a System.gc() or
1939   // the number of full gc's since the last concurrent cycle
1940   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1941   // or if an incremental collection has failed
1942   GenCollectedHeap* gch = GenCollectedHeap::heap();
1943   assert(gch->collector_policy()->is_two_generation_policy(),
1944          "You may want to check the correctness of the following");
1945   // Inform cms gen if this was due to partial collection failing.
1946   // The CMS gen may use this fact to determine its expansion policy.
1947   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1948     assert(!_cmsGen->incremental_collection_failed(),
1949            "Should have been noticed, reacted to and cleared");
1950     _cmsGen->set_incremental_collection_failed();
1951   }
1952   *should_compact =
1953     UseCMSCompactAtFullCollection &&
1954     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1955      GCCause::is_user_requested_gc(gch->gc_cause()) ||
1956      gch->incremental_collection_will_fail(true /* consult_young */));
1957   *should_start_over = false;
1958   if (clear_all_soft_refs && !*should_compact) {
1959     // We are about to do a last ditch collection attempt
1960     // so it would normally make sense to do a compaction
1961     // to reclaim as much space as possible.
1962     if (CMSCompactWhenClearAllSoftRefs) {
1963       // Default: The rationale is that in this case either
1964       // we are past the final marking phase, in which case
1965       // we'd have to start over, or so little has been done
1966       // that there's little point in saving that work. Compaction
1967       // appears to be the sensible choice in either case.
1968       *should_compact = true;
1969     } else {
1970       // We have been asked to clear all soft refs, but not to
1971       // compact. Make sure that we aren't past the final checkpoint
1972       // phase, for that is where we process soft refs. If we are already
1973       // past that phase, we'll need to redo the refs discovery phase and
1974       // if necessary clear soft refs that weren't previously
1975       // cleared. We do so by remembering the phase in which
1976       // we came in, and if we are past the refs processing
1977       // phase, we'll choose to just redo the mark-sweep
1978       // collection from scratch.
1979       if (_collectorState > FinalMarking) {
1980         // We are past the refs processing phase;
1981         // start over and do a fresh synchronous CMS cycle
1982         _collectorState = Resetting; // skip to reset to start new cycle
1983         reset(false /* == !asynch */);
1984         *should_start_over = true;
1985       } // else we can continue a possibly ongoing current cycle
1986     }
1987   }
1988 }
1989 
1990 // A work method used by the foreground collector to do
1991 // a mark-sweep-compact.
1992 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1993   GenCollectedHeap* gch = GenCollectedHeap::heap();
1994 
1995   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1996   gc_timer->register_gc_start(os::elapsed_counter());
1997 
1998   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1999   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2000 
2001   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2002   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2003     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2004       "collections passed to foreground collector", _full_gcs_since_conc_gc);
2005   }
2006 
2007   // Sample collection interval time and reset for collection pause.
2008   if (UseAdaptiveSizePolicy) {
2009     size_policy()->msc_collection_begin();
2010   }
2011 
2012   // Temporarily widen the span of the weak reference processing to
2013   // the entire heap.
2014   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2015   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2016   // Temporarily, clear the "is_alive_non_header" field of the
2017   // reference processor.
2018   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2019   // Temporarily make reference _processing_ single threaded (non-MT).
2020   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2021   // Temporarily make refs discovery atomic
2022   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2023   // Temporarily make reference _discovery_ single threaded (non-MT)
2024   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2025 
2026   ref_processor()->set_enqueuing_is_done(false);
2027   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2028   ref_processor()->setup_policy(clear_all_soft_refs);
2029   // If an asynchronous collection finishes, the _modUnionTable is
2030   // all clear.  If we are assuming the collection from an asynchronous
2031   // collection, clear the _modUnionTable.
2032   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2033     "_modUnionTable should be clear if the baton was not passed");
2034   _modUnionTable.clear_all();
2035 
2036   // We must adjust the allocation statistics being maintained
2037   // in the free list space. We do so by reading and clearing
2038   // the sweep timer and updating the block flux rate estimates below.
2039   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2040   if (_inter_sweep_timer.is_active()) {
2041     _inter_sweep_timer.stop();
2042     // Note that we do not use this sample to update the _inter_sweep_estimate.
2043     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2044                                             _inter_sweep_estimate.padded_average(),
2045                                             _intra_sweep_estimate.padded_average());
2046   }
2047 
2048   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2049     ref_processor(), clear_all_soft_refs);
2050   #ifdef ASSERT
2051     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2052     size_t free_size = cms_space->free();
2053     assert(free_size ==
2054            pointer_delta(cms_space->end(), cms_space->compaction_top())
2055            * HeapWordSize,
2056       "All the free space should be compacted into one chunk at top");
2057     assert(cms_space->dictionary()->total_chunk_size(
2058                                       debug_only(cms_space->freelistLock())) == 0 ||
2059            cms_space->totalSizeInIndexedFreeLists() == 0,
2060       "All the free space should be in a single chunk");
2061     size_t num = cms_space->totalCount();
2062     assert((free_size == 0 && num == 0) ||
2063            (free_size > 0  && (num == 1 || num == 2)),
2064          "There should be at most 2 free chunks after compaction");
2065   #endif // ASSERT
2066   _collectorState = Resetting;
2067   assert(_restart_addr == NULL,
2068          "Should have been NULL'd before baton was passed");
2069   reset(false /* == !asynch */);
2070   _cmsGen->reset_after_compaction();
2071   _concurrent_cycles_since_last_unload = 0;
2072 
2073   if (verifying() && !should_unload_classes()) {
2074     perm_gen_verify_bit_map()->clear_all();
2075   }
2076 
2077   // Clear any data recorded in the PLAB chunk arrays.
2078   if (_survivor_plab_array != NULL) {
2079     reset_survivor_plab_arrays();
2080   }
2081 
2082   // Adjust the per-size allocation stats for the next epoch.
2083   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2084   // Restart the "inter sweep timer" for the next epoch.
2085   _inter_sweep_timer.reset();
2086   _inter_sweep_timer.start();
2087 
2088   // Sample collection pause time and reset for collection interval.
2089   if (UseAdaptiveSizePolicy) {
2090     size_policy()->msc_collection_end(gch->gc_cause());
2091   }
2092 
2093   gc_timer->register_gc_end(os::elapsed_counter());
2094 
2095   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2096 
2097   // For a mark-sweep-compact, compute_new_size() will be called
2098   // in the heap's do_collection() method.
2099 }
2100 
2101 // A work method used by the foreground collector to do
2102 // a mark-sweep, after taking over from a possibly on-going
2103 // concurrent mark-sweep collection.
2104 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2105   CollectorState first_state, bool should_start_over) {
2106   if (PrintGC && Verbose) {
2107     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2108       "collector with count %d",
2109       _full_gcs_since_conc_gc);
2110   }
2111   switch (_collectorState) {
2112     case Idling:
2113       if (first_state == Idling || should_start_over) {
2114         // The background GC was not active, or should
2115         // restarted from scratch;  start the cycle.
2116         _collectorState = InitialMarking;
2117       }
2118       // If first_state was not Idling, then a background GC
2119       // was in progress and has now finished.  No need to do it
2120       // again.  Leave the state as Idling.
2121       break;
2122     case Precleaning:
2123       // In the foreground case don't do the precleaning since
2124       // it is not done concurrently and there is extra work
2125       // required.
2126       _collectorState = FinalMarking;
2127   }
2128   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2129 
2130   // For a mark-sweep, compute_new_size() will be called
2131   // in the heap's do_collection() method.
2132 }
2133 
2134 
2135 void CMSCollector::getFreelistLocks() const {
2136   // Get locks for all free lists in all generations that this
2137   // collector is responsible for
2138   _cmsGen->freelistLock()->lock_without_safepoint_check();
2139   _permGen->freelistLock()->lock_without_safepoint_check();
2140 }
2141 
2142 void CMSCollector::releaseFreelistLocks() const {
2143   // Release locks for all free lists in all generations that this
2144   // collector is responsible for
2145   _cmsGen->freelistLock()->unlock();
2146   _permGen->freelistLock()->unlock();
2147 }
2148 
2149 bool CMSCollector::haveFreelistLocks() const {
2150   // Check locks for all free lists in all generations that this
2151   // collector is responsible for
2152   assert_lock_strong(_cmsGen->freelistLock());
2153   assert_lock_strong(_permGen->freelistLock());
2154   PRODUCT_ONLY(ShouldNotReachHere());
2155   return true;
2156 }
2157 
2158 // A utility class that is used by the CMS collector to
2159 // temporarily "release" the foreground collector from its
2160 // usual obligation to wait for the background collector to
2161 // complete an ongoing phase before proceeding.
2162 class ReleaseForegroundGC: public StackObj {
2163  private:
2164   CMSCollector* _c;
2165  public:
2166   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2167     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2168     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2169     // allow a potentially blocked foreground collector to proceed
2170     _c->_foregroundGCShouldWait = false;
2171     if (_c->_foregroundGCIsActive) {
2172       CGC_lock->notify();
2173     }
2174     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2175            "Possible deadlock");
2176   }
2177 
2178   ~ReleaseForegroundGC() {
2179     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2180     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2181     _c->_foregroundGCShouldWait = true;
2182   }
2183 };
2184 
2185 // There are separate collect_in_background and collect_in_foreground because of
2186 // the different locking requirements of the background collector and the
2187 // foreground collector.  There was originally an attempt to share
2188 // one "collect" method between the background collector and the foreground
2189 // collector but the if-then-else required made it cleaner to have
2190 // separate methods.
2191 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2192   assert(Thread::current()->is_ConcurrentGC_thread(),
2193     "A CMS asynchronous collection is only allowed on a CMS thread.");
2194 
2195   GenCollectedHeap* gch = GenCollectedHeap::heap();
2196   {
2197     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2198     MutexLockerEx hl(Heap_lock, safepoint_check);
2199     FreelistLocker fll(this);
2200     MutexLockerEx x(CGC_lock, safepoint_check);
2201     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2202       // The foreground collector is active or we're
2203       // not using asynchronous collections.  Skip this
2204       // background collection.
2205       assert(!_foregroundGCShouldWait, "Should be clear");
2206       return;
2207     } else {
2208       assert(_collectorState == Idling, "Should be idling before start.");
2209       _collectorState = InitialMarking;
2210       register_gc_start(cause);
2211       // Reset the expansion cause, now that we are about to begin
2212       // a new cycle.
2213       clear_expansion_cause();
2214     }
2215     // Decide if we want to enable class unloading as part of the
2216     // ensuing concurrent GC cycle.
2217     update_should_unload_classes();
2218     _full_gc_requested = false;           // acks all outstanding full gc requests
2219     _full_gc_cause = GCCause::_no_gc;
2220     // Signal that we are about to start a collection
2221     gch->increment_total_full_collections();  // ... starting a collection cycle
2222     _collection_count_start = gch->total_full_collections();
2223   }
2224 
2225   // Used for PrintGC
2226   size_t prev_used;
2227   if (PrintGC && Verbose) {
2228     prev_used = _cmsGen->used(); // XXXPERM
2229   }
2230 
2231   // The change of the collection state is normally done at this level;
2232   // the exceptions are phases that are executed while the world is
2233   // stopped.  For those phases the change of state is done while the
2234   // world is stopped.  For baton passing purposes this allows the
2235   // background collector to finish the phase and change state atomically.
2236   // The foreground collector cannot wait on a phase that is done
2237   // while the world is stopped because the foreground collector already
2238   // has the world stopped and would deadlock.
2239   while (_collectorState != Idling) {
2240     if (TraceCMSState) {
2241       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2242         Thread::current(), _collectorState);
2243     }
2244     // The foreground collector
2245     //   holds the Heap_lock throughout its collection.
2246     //   holds the CMS token (but not the lock)
2247     //     except while it is waiting for the background collector to yield.
2248     //
2249     // The foreground collector should be blocked (not for long)
2250     //   if the background collector is about to start a phase
2251     //   executed with world stopped.  If the background
2252     //   collector has already started such a phase, the
2253     //   foreground collector is blocked waiting for the
2254     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
2255     //   are executed in the VM thread.
2256     //
2257     // The locking order is
2258     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
2259     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
2260     //   CMS token  (claimed in
2261     //                stop_world_and_do() -->
2262     //                  safepoint_synchronize() -->
2263     //                    CMSThread::synchronize())
2264 
2265     {
2266       // Check if the FG collector wants us to yield.
2267       CMSTokenSync x(true); // is cms thread
2268       if (waitForForegroundGC()) {
2269         // We yielded to a foreground GC, nothing more to be
2270         // done this round.
2271         assert(_foregroundGCShouldWait == false, "We set it to false in "
2272                "waitForForegroundGC()");
2273         if (TraceCMSState) {
2274           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2275             " exiting collection CMS state %d",
2276             Thread::current(), _collectorState);
2277         }
2278         return;
2279       } else {
2280         // The background collector can run but check to see if the
2281         // foreground collector has done a collection while the
2282         // background collector was waiting to get the CGC_lock
2283         // above.  If yes, break so that _foregroundGCShouldWait
2284         // is cleared before returning.
2285         if (_collectorState == Idling) {
2286           break;
2287         }
2288       }
2289     }
2290 
2291     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2292       "should be waiting");
2293 
2294     switch (_collectorState) {
2295       case InitialMarking:
2296         {
2297           ReleaseForegroundGC x(this);
2298           stats().record_cms_begin();
2299           VM_CMS_Initial_Mark initial_mark_op(this);
2300           VMThread::execute(&initial_mark_op);
2301         }
2302         // The collector state may be any legal state at this point
2303         // since the background collector may have yielded to the
2304         // foreground collector.
2305         break;
2306       case Marking:
2307         // initial marking in checkpointRootsInitialWork has been completed
2308         if (markFromRoots(true)) { // we were successful
2309           assert(_collectorState == Precleaning, "Collector state should "
2310             "have changed");
2311         } else {
2312           assert(_foregroundGCIsActive, "Internal state inconsistency");
2313         }
2314         break;
2315       case Precleaning:
2316         if (UseAdaptiveSizePolicy) {
2317           size_policy()->concurrent_precleaning_begin();
2318         }
2319         // marking from roots in markFromRoots has been completed
2320         preclean();
2321         if (UseAdaptiveSizePolicy) {
2322           size_policy()->concurrent_precleaning_end();
2323         }
2324         assert(_collectorState == AbortablePreclean ||
2325                _collectorState == FinalMarking,
2326                "Collector state should have changed");
2327         break;
2328       case AbortablePreclean:
2329         if (UseAdaptiveSizePolicy) {
2330         size_policy()->concurrent_phases_resume();
2331         }
2332         abortable_preclean();
2333         if (UseAdaptiveSizePolicy) {
2334           size_policy()->concurrent_precleaning_end();
2335         }
2336         assert(_collectorState == FinalMarking, "Collector state should "
2337           "have changed");
2338         break;
2339       case FinalMarking:
2340         {
2341           ReleaseForegroundGC x(this);
2342 
2343           VM_CMS_Final_Remark final_remark_op(this);
2344           VMThread::execute(&final_remark_op);
2345         }
2346         assert(_foregroundGCShouldWait, "block post-condition");
2347         break;
2348       case Sweeping:
2349         if (UseAdaptiveSizePolicy) {
2350           size_policy()->concurrent_sweeping_begin();
2351         }
2352         // final marking in checkpointRootsFinal has been completed
2353         sweep(true);
2354         assert(_collectorState == Resizing, "Collector state change "
2355           "to Resizing must be done under the free_list_lock");
2356         _full_gcs_since_conc_gc = 0;
2357 
2358         // Stop the timers for adaptive size policy for the concurrent phases
2359         if (UseAdaptiveSizePolicy) {
2360           size_policy()->concurrent_sweeping_end();
2361           size_policy()->concurrent_phases_end(gch->gc_cause(),
2362                                              gch->prev_gen(_cmsGen)->capacity(),
2363                                              _cmsGen->free());
2364         }
2365 
2366       case Resizing: {
2367         // Sweeping has been completed...
2368         // At this point the background collection has completed.
2369         // Don't move the call to compute_new_size() down
2370         // into code that might be executed if the background
2371         // collection was preempted.
2372         {
2373           ReleaseForegroundGC x(this);   // unblock FG collection
2374           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2375           CMSTokenSync        z(true);   // not strictly needed.
2376           if (_collectorState == Resizing) {
2377             compute_new_size();
2378             save_heap_summary();
2379             _collectorState = Resetting;
2380           } else {
2381             assert(_collectorState == Idling, "The state should only change"
2382                    " because the foreground collector has finished the collection");
2383           }
2384         }
2385         break;
2386       }
2387       case Resetting:
2388         // CMS heap resizing has been completed
2389         reset(true);
2390         assert(_collectorState == Idling, "Collector state should "
2391           "have changed");
2392         stats().record_cms_end();
2393         // Don't move the concurrent_phases_end() and compute_new_size()
2394         // calls to here because a preempted background collection
2395         // has it's state set to "Resetting".
2396         break;
2397       case Idling:
2398       default:
2399         ShouldNotReachHere();
2400         break;
2401     }
2402     if (TraceCMSState) {
2403       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2404         Thread::current(), _collectorState);
2405     }
2406     assert(_foregroundGCShouldWait, "block post-condition");
2407   }
2408 
2409   // Should this be in gc_epilogue?
2410   collector_policy()->counters()->update_counters();
2411 
2412   {
2413     // Clear _foregroundGCShouldWait and, in the event that the
2414     // foreground collector is waiting, notify it, before
2415     // returning.
2416     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2417     _foregroundGCShouldWait = false;
2418     if (_foregroundGCIsActive) {
2419       CGC_lock->notify();
2420     }
2421     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2422            "Possible deadlock");
2423   }
2424   if (TraceCMSState) {
2425     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2426       " exiting collection CMS state %d",
2427       Thread::current(), _collectorState);
2428   }
2429   if (PrintGC && Verbose) {
2430     _cmsGen->print_heap_change(prev_used);
2431   }
2432 }
2433 
2434 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2435   if (!_cms_start_registered) {
2436     register_gc_start(cause);
2437   }
2438 }
2439 
2440 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2441   _cms_start_registered = true;
2442   _gc_timer_cm->register_gc_start(os::elapsed_counter());
2443   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2444 }
2445 
2446 void CMSCollector::register_gc_end() {
2447   if (_cms_start_registered) {
2448     report_heap_summary(GCWhen::AfterGC);
2449 
2450     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2451     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2452     _cms_start_registered = false;
2453   }
2454 }
2455 
2456 void CMSCollector::save_heap_summary() {
2457   GenCollectedHeap* gch = GenCollectedHeap::heap();
2458   _last_heap_summary = gch->create_heap_summary();
2459   _last_perm_gen_summary = gch->create_perm_gen_summary();
2460 }
2461 
2462 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2463   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_perm_gen_summary);
2464 }
2465 
2466 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2467   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2468          "Foreground collector should be waiting, not executing");
2469   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2470     "may only be done by the VM Thread with the world stopped");
2471   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2472          "VM thread should have CMS token");
2473 
2474   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2475     true, NULL);)
2476   if (UseAdaptiveSizePolicy) {
2477     size_policy()->ms_collection_begin();
2478   }
2479   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2480 
2481   HandleMark hm;  // Discard invalid handles created during verification
2482 
2483   if (VerifyBeforeGC &&
2484       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2485     Universe::verify();
2486   }
2487 
2488   // Snapshot the soft reference policy to be used in this collection cycle.
2489   ref_processor()->setup_policy(clear_all_soft_refs);
2490 
2491   bool init_mark_was_synchronous = false; // until proven otherwise
2492   while (_collectorState != Idling) {
2493     if (TraceCMSState) {
2494       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2495         Thread::current(), _collectorState);
2496     }
2497     switch (_collectorState) {
2498       case InitialMarking:
2499         register_foreground_gc_start(cause);
2500         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2501         checkpointRootsInitial(false);
2502         assert(_collectorState == Marking, "Collector state should have changed"
2503           " within checkpointRootsInitial()");
2504         break;
2505       case Marking:
2506         // initial marking in checkpointRootsInitialWork has been completed
2507         if (VerifyDuringGC &&
2508             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2509           gclog_or_tty->print("Verify before initial mark: ");
2510           Universe::verify();
2511         }
2512         {
2513           bool res = markFromRoots(false);
2514           assert(res && _collectorState == FinalMarking, "Collector state should "
2515             "have changed");
2516           break;
2517         }
2518       case FinalMarking:
2519         if (VerifyDuringGC &&
2520             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2521           gclog_or_tty->print("Verify before re-mark: ");
2522           Universe::verify();
2523         }
2524         checkpointRootsFinal(false, clear_all_soft_refs,
2525                              init_mark_was_synchronous);
2526         assert(_collectorState == Sweeping, "Collector state should not "
2527           "have changed within checkpointRootsFinal()");
2528         break;
2529       case Sweeping:
2530         // final marking in checkpointRootsFinal has been completed
2531         if (VerifyDuringGC &&
2532             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2533           gclog_or_tty->print("Verify before sweep: ");
2534           Universe::verify();
2535         }
2536         sweep(false);
2537         assert(_collectorState == Resizing, "Incorrect state");
2538         break;
2539       case Resizing: {
2540         // Sweeping has been completed; the actual resize in this case
2541         // is done separately; nothing to be done in this state.
2542         _collectorState = Resetting;
2543         break;
2544       }
2545       case Resetting:
2546         // The heap has been resized.
2547         if (VerifyDuringGC &&
2548             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2549           gclog_or_tty->print("Verify before reset: ");
2550           Universe::verify();
2551         }
2552         save_heap_summary();
2553         reset(false);
2554         assert(_collectorState == Idling, "Collector state should "
2555           "have changed");
2556         break;
2557       case Precleaning:
2558       case AbortablePreclean:
2559         // Elide the preclean phase
2560         _collectorState = FinalMarking;
2561         break;
2562       default:
2563         ShouldNotReachHere();
2564     }
2565     if (TraceCMSState) {
2566       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2567         Thread::current(), _collectorState);
2568     }
2569   }
2570 
2571   if (UseAdaptiveSizePolicy) {
2572     GenCollectedHeap* gch = GenCollectedHeap::heap();
2573     size_policy()->ms_collection_end(gch->gc_cause());
2574   }
2575 
2576   if (VerifyAfterGC &&
2577       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2578     Universe::verify();
2579   }
2580   if (TraceCMSState) {
2581     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2582       " exiting collection CMS state %d",
2583       Thread::current(), _collectorState);
2584   }
2585 }
2586 
2587 bool CMSCollector::waitForForegroundGC() {
2588   bool res = false;
2589   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2590          "CMS thread should have CMS token");
2591   // Block the foreground collector until the
2592   // background collectors decides whether to
2593   // yield.
2594   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2595   _foregroundGCShouldWait = true;
2596   if (_foregroundGCIsActive) {
2597     // The background collector yields to the
2598     // foreground collector and returns a value
2599     // indicating that it has yielded.  The foreground
2600     // collector can proceed.
2601     res = true;
2602     _foregroundGCShouldWait = false;
2603     ConcurrentMarkSweepThread::clear_CMS_flag(
2604       ConcurrentMarkSweepThread::CMS_cms_has_token);
2605     ConcurrentMarkSweepThread::set_CMS_flag(
2606       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2607     // Get a possibly blocked foreground thread going
2608     CGC_lock->notify();
2609     if (TraceCMSState) {
2610       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2611         Thread::current(), _collectorState);
2612     }
2613     while (_foregroundGCIsActive) {
2614       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2615     }
2616     ConcurrentMarkSweepThread::set_CMS_flag(
2617       ConcurrentMarkSweepThread::CMS_cms_has_token);
2618     ConcurrentMarkSweepThread::clear_CMS_flag(
2619       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2620   }
2621   if (TraceCMSState) {
2622     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2623       Thread::current(), _collectorState);
2624   }
2625   return res;
2626 }
2627 
2628 // Because of the need to lock the free lists and other structures in
2629 // the collector, common to all the generations that the collector is
2630 // collecting, we need the gc_prologues of individual CMS generations
2631 // delegate to their collector. It may have been simpler had the
2632 // current infrastructure allowed one to call a prologue on a
2633 // collector. In the absence of that we have the generation's
2634 // prologue delegate to the collector, which delegates back
2635 // some "local" work to a worker method in the individual generations
2636 // that it's responsible for collecting, while itself doing any
2637 // work common to all generations it's responsible for. A similar
2638 // comment applies to the  gc_epilogue()'s.
2639 // The role of the varaible _between_prologue_and_epilogue is to
2640 // enforce the invocation protocol.
2641 void CMSCollector::gc_prologue(bool full) {
2642   // Call gc_prologue_work() for each CMSGen and PermGen that
2643   // we are responsible for.
2644 
2645   // The following locking discipline assumes that we are only called
2646   // when the world is stopped.
2647   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2648 
2649   // The CMSCollector prologue must call the gc_prologues for the
2650   // "generations" (including PermGen if any) that it's responsible
2651   // for.
2652 
2653   assert(   Thread::current()->is_VM_thread()
2654          || (   CMSScavengeBeforeRemark
2655              && Thread::current()->is_ConcurrentGC_thread()),
2656          "Incorrect thread type for prologue execution");
2657 
2658   if (_between_prologue_and_epilogue) {
2659     // We have already been invoked; this is a gc_prologue delegation
2660     // from yet another CMS generation that we are responsible for, just
2661     // ignore it since all relevant work has already been done.
2662     return;
2663   }
2664 
2665   // set a bit saying prologue has been called; cleared in epilogue
2666   _between_prologue_and_epilogue = true;
2667   // Claim locks for common data structures, then call gc_prologue_work()
2668   // for each CMSGen and PermGen that we are responsible for.
2669 
2670   getFreelistLocks();   // gets free list locks on constituent spaces
2671   bitMapLock()->lock_without_safepoint_check();
2672 
2673   // Should call gc_prologue_work() for all cms gens we are responsible for
2674   bool registerClosure =    _collectorState >= Marking
2675                          && _collectorState < Sweeping;
2676   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2677                                                &_modUnionClosurePar
2678                                                : &_modUnionClosure;
2679   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2680   _permGen->gc_prologue_work(full, registerClosure, muc);
2681 
2682   if (!full) {
2683     stats().record_gc0_begin();
2684   }
2685 }
2686 
2687 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2688   // Delegate to CMScollector which knows how to coordinate between
2689   // this and any other CMS generations that it is responsible for
2690   // collecting.
2691   collector()->gc_prologue(full);
2692 }
2693 
2694 // This is a "private" interface for use by this generation's CMSCollector.
2695 // Not to be called directly by any other entity (for instance,
2696 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2697 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2698   bool registerClosure, ModUnionClosure* modUnionClosure) {
2699   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2700   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2701     "Should be NULL");
2702   if (registerClosure) {
2703     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2704   }
2705   cmsSpace()->gc_prologue();
2706   // Clear stat counters
2707   NOT_PRODUCT(
2708     assert(_numObjectsPromoted == 0, "check");
2709     assert(_numWordsPromoted   == 0, "check");
2710     if (Verbose && PrintGC) {
2711       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2712                           SIZE_FORMAT" bytes concurrently",
2713       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2714     }
2715     _numObjectsAllocated = 0;
2716     _numWordsAllocated   = 0;
2717   )
2718 }
2719 
2720 void CMSCollector::gc_epilogue(bool full) {
2721   // The following locking discipline assumes that we are only called
2722   // when the world is stopped.
2723   assert(SafepointSynchronize::is_at_safepoint(),
2724          "world is stopped assumption");
2725 
2726   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2727   // if linear allocation blocks need to be appropriately marked to allow the
2728   // the blocks to be parsable. We also check here whether we need to nudge the
2729   // CMS collector thread to start a new cycle (if it's not already active).
2730   assert(   Thread::current()->is_VM_thread()
2731          || (   CMSScavengeBeforeRemark
2732              && Thread::current()->is_ConcurrentGC_thread()),
2733          "Incorrect thread type for epilogue execution");
2734 
2735   if (!_between_prologue_and_epilogue) {
2736     // We have already been invoked; this is a gc_epilogue delegation
2737     // from yet another CMS generation that we are responsible for, just
2738     // ignore it since all relevant work has already been done.
2739     return;
2740   }
2741   assert(haveFreelistLocks(), "must have freelist locks");
2742   assert_lock_strong(bitMapLock());
2743 
2744   _cmsGen->gc_epilogue_work(full);
2745   _permGen->gc_epilogue_work(full);
2746 
2747   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2748     // in case sampling was not already enabled, enable it
2749     _start_sampling = true;
2750   }
2751   // reset _eden_chunk_array so sampling starts afresh
2752   _eden_chunk_index = 0;
2753 
2754   size_t cms_used   = _cmsGen->cmsSpace()->used();
2755   size_t perm_used  = _permGen->cmsSpace()->used();
2756 
2757   // update performance counters - this uses a special version of
2758   // update_counters() that allows the utilization to be passed as a
2759   // parameter, avoiding multiple calls to used().
2760   //
2761   _cmsGen->update_counters(cms_used);
2762   _permGen->update_counters(perm_used);
2763 
2764   if (CMSIncrementalMode) {
2765     icms_update_allocation_limits();
2766   }
2767 
2768   bitMapLock()->unlock();
2769   releaseFreelistLocks();
2770 
2771   if (!CleanChunkPoolAsync) {
2772     Chunk::clean_chunk_pool();
2773   }
2774 
2775   _between_prologue_and_epilogue = false;  // ready for next cycle
2776 }
2777 
2778 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2779   collector()->gc_epilogue(full);
2780 
2781   // Also reset promotion tracking in par gc thread states.
2782   if (CollectedHeap::use_parallel_gc_threads()) {
2783     for (uint i = 0; i < ParallelGCThreads; i++) {
2784       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2785     }
2786   }
2787 }
2788 
2789 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2790   assert(!incremental_collection_failed(), "Should have been cleared");
2791   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2792   cmsSpace()->gc_epilogue();
2793     // Print stat counters
2794   NOT_PRODUCT(
2795     assert(_numObjectsAllocated == 0, "check");
2796     assert(_numWordsAllocated == 0, "check");
2797     if (Verbose && PrintGC) {
2798       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2799                           SIZE_FORMAT" bytes",
2800                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2801     }
2802     _numObjectsPromoted = 0;
2803     _numWordsPromoted   = 0;
2804   )
2805 
2806   if (PrintGC && Verbose) {
2807     // Call down the chain in contiguous_available needs the freelistLock
2808     // so print this out before releasing the freeListLock.
2809     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2810                         contiguous_available());
2811   }
2812 }
2813 
2814 #ifndef PRODUCT
2815 bool CMSCollector::have_cms_token() {
2816   Thread* thr = Thread::current();
2817   if (thr->is_VM_thread()) {
2818     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2819   } else if (thr->is_ConcurrentGC_thread()) {
2820     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2821   } else if (thr->is_GC_task_thread()) {
2822     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2823            ParGCRareEvent_lock->owned_by_self();
2824   }
2825   return false;
2826 }
2827 #endif
2828 
2829 // Check reachability of the given heap address in CMS generation,
2830 // treating all other generations as roots.
2831 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2832   // We could "guarantee" below, rather than assert, but i'll
2833   // leave these as "asserts" so that an adventurous debugger
2834   // could try this in the product build provided some subset of
2835   // the conditions were met, provided they were intersted in the
2836   // results and knew that the computation below wouldn't interfere
2837   // with other concurrent computations mutating the structures
2838   // being read or written.
2839   assert(SafepointSynchronize::is_at_safepoint(),
2840          "Else mutations in object graph will make answer suspect");
2841   assert(have_cms_token(), "Should hold cms token");
2842   assert(haveFreelistLocks(), "must hold free list locks");
2843   assert_lock_strong(bitMapLock());
2844 
2845   // Clear the marking bit map array before starting, but, just
2846   // for kicks, first report if the given address is already marked
2847   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2848                 _markBitMap.isMarked(addr) ? "" : " not");
2849 
2850   if (verify_after_remark()) {
2851     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2852     bool result = verification_mark_bm()->isMarked(addr);
2853     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2854                            result ? "IS" : "is NOT");
2855     return result;
2856   } else {
2857     gclog_or_tty->print_cr("Could not compute result");
2858     return false;
2859   }
2860 }
2861 
2862 ////////////////////////////////////////////////////////
2863 // CMS Verification Support
2864 ////////////////////////////////////////////////////////
2865 // Following the remark phase, the following invariant
2866 // should hold -- each object in the CMS heap which is
2867 // marked in markBitMap() should be marked in the verification_mark_bm().
2868 
2869 class VerifyMarkedClosure: public BitMapClosure {
2870   CMSBitMap* _marks;
2871   bool       _failed;
2872 
2873  public:
2874   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2875 
2876   bool do_bit(size_t offset) {
2877     HeapWord* addr = _marks->offsetToHeapWord(offset);
2878     if (!_marks->isMarked(addr)) {
2879       oop(addr)->print_on(gclog_or_tty);
2880       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2881       _failed = true;
2882     }
2883     return true;
2884   }
2885 
2886   bool failed() { return _failed; }
2887 };
2888 
2889 bool CMSCollector::verify_after_remark() {
2890   gclog_or_tty->print(" [Verifying CMS Marking... ");
2891   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2892   static bool init = false;
2893 
2894   assert(SafepointSynchronize::is_at_safepoint(),
2895          "Else mutations in object graph will make answer suspect");
2896   assert(have_cms_token(),
2897          "Else there may be mutual interference in use of "
2898          " verification data structures");
2899   assert(_collectorState > Marking && _collectorState <= Sweeping,
2900          "Else marking info checked here may be obsolete");
2901   assert(haveFreelistLocks(), "must hold free list locks");
2902   assert_lock_strong(bitMapLock());
2903 
2904 
2905   // Allocate marking bit map if not already allocated
2906   if (!init) { // first time
2907     if (!verification_mark_bm()->allocate(_span)) {
2908       return false;
2909     }
2910     init = true;
2911   }
2912 
2913   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2914 
2915   // Turn off refs discovery -- so we will be tracing through refs.
2916   // This is as intended, because by this time
2917   // GC must already have cleared any refs that need to be cleared,
2918   // and traced those that need to be marked; moreover,
2919   // the marking done here is not going to intefere in any
2920   // way with the marking information used by GC.
2921   NoRefDiscovery no_discovery(ref_processor());
2922 
2923   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2924 
2925   // Clear any marks from a previous round
2926   verification_mark_bm()->clear_all();
2927   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2928   verify_work_stacks_empty();
2929 
2930   GenCollectedHeap* gch = GenCollectedHeap::heap();
2931   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2932   // Update the saved marks which may affect the root scans.
2933   gch->save_marks();
2934 
2935   if (CMSRemarkVerifyVariant == 1) {
2936     // In this first variant of verification, we complete
2937     // all marking, then check if the new marks-verctor is
2938     // a subset of the CMS marks-vector.
2939     verify_after_remark_work_1();
2940   } else if (CMSRemarkVerifyVariant == 2) {
2941     // In this second variant of verification, we flag an error
2942     // (i.e. an object reachable in the new marks-vector not reachable
2943     // in the CMS marks-vector) immediately, also indicating the
2944     // identify of an object (A) that references the unmarked object (B) --
2945     // presumably, a mutation to A failed to be picked up by preclean/remark?
2946     verify_after_remark_work_2();
2947   } else {
2948     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2949             CMSRemarkVerifyVariant);
2950   }
2951   gclog_or_tty->print(" done] ");
2952   return true;
2953 }
2954 
2955 void CMSCollector::verify_after_remark_work_1() {
2956   ResourceMark rm;
2957   HandleMark  hm;
2958   GenCollectedHeap* gch = GenCollectedHeap::heap();
2959 
2960   // Mark from roots one level into CMS
2961   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2962   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2963 
2964   gch->gen_process_strong_roots(_cmsGen->level(),
2965                                 true,   // younger gens are roots
2966                                 true,   // activate StrongRootsScope
2967                                 true,   // collecting perm gen
2968                                 SharedHeap::ScanningOption(roots_scanning_options()),
2969                                 &notOlder,
2970                                 true,   // walk code active on stacks
2971                                 NULL);
2972 
2973   // Now mark from the roots
2974   assert(_revisitStack.isEmpty(), "Should be empty");
2975   MarkFromRootsClosure markFromRootsClosure(this, _span,
2976     verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2977     false /* don't yield */, true /* verifying */);
2978   assert(_restart_addr == NULL, "Expected pre-condition");
2979   verification_mark_bm()->iterate(&markFromRootsClosure);
2980   while (_restart_addr != NULL) {
2981     // Deal with stack overflow: by restarting at the indicated
2982     // address.
2983     HeapWord* ra = _restart_addr;
2984     markFromRootsClosure.reset(ra);
2985     _restart_addr = NULL;
2986     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2987   }
2988   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2989   verify_work_stacks_empty();
2990   // Should reset the revisit stack above, since no class tree
2991   // surgery is forthcoming.
2992   _revisitStack.reset(); // throwing away all contents
2993 
2994   // Marking completed -- now verify that each bit marked in
2995   // verification_mark_bm() is also marked in markBitMap(); flag all
2996   // errors by printing corresponding objects.
2997   VerifyMarkedClosure vcl(markBitMap());
2998   verification_mark_bm()->iterate(&vcl);
2999   if (vcl.failed()) {
3000     gclog_or_tty->print("Verification failed");
3001     Universe::heap()->print_on(gclog_or_tty);
3002     fatal("CMS: failed marking verification after remark");
3003   }
3004 }
3005 
3006 void CMSCollector::verify_after_remark_work_2() {
3007   ResourceMark rm;
3008   HandleMark  hm;
3009   GenCollectedHeap* gch = GenCollectedHeap::heap();
3010 
3011   // Mark from roots one level into CMS
3012   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3013                                      markBitMap());
3014   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3015   gch->gen_process_strong_roots(_cmsGen->level(),
3016                                 true,   // younger gens are roots
3017                                 true,   // activate StrongRootsScope
3018                                 true,   // collecting perm gen
3019                                 SharedHeap::ScanningOption(roots_scanning_options()),
3020                                 &notOlder,
3021                                 true,   // walk code active on stacks
3022                                 NULL);
3023 
3024   // Now mark from the roots
3025   assert(_revisitStack.isEmpty(), "Should be empty");
3026   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3027     verification_mark_bm(), markBitMap(), verification_mark_stack());
3028   assert(_restart_addr == NULL, "Expected pre-condition");
3029   verification_mark_bm()->iterate(&markFromRootsClosure);
3030   while (_restart_addr != NULL) {
3031     // Deal with stack overflow: by restarting at the indicated
3032     // address.
3033     HeapWord* ra = _restart_addr;
3034     markFromRootsClosure.reset(ra);
3035     _restart_addr = NULL;
3036     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3037   }
3038   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3039   verify_work_stacks_empty();
3040   // Should reset the revisit stack above, since no class tree
3041   // surgery is forthcoming.
3042   _revisitStack.reset(); // throwing away all contents
3043 
3044   // Marking completed -- now verify that each bit marked in
3045   // verification_mark_bm() is also marked in markBitMap(); flag all
3046   // errors by printing corresponding objects.
3047   VerifyMarkedClosure vcl(markBitMap());
3048   verification_mark_bm()->iterate(&vcl);
3049   assert(!vcl.failed(), "Else verification above should not have succeeded");
3050 }
3051 
3052 void ConcurrentMarkSweepGeneration::save_marks() {
3053   // delegate to CMS space
3054   cmsSpace()->save_marks();
3055   for (uint i = 0; i < ParallelGCThreads; i++) {
3056     _par_gc_thread_states[i]->promo.startTrackingPromotions();
3057   }
3058 }
3059 
3060 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3061   return cmsSpace()->no_allocs_since_save_marks();
3062 }
3063 
3064 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
3065                                                                 \
3066 void ConcurrentMarkSweepGeneration::                            \
3067 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
3068   cl->set_generation(this);                                     \
3069   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
3070   cl->reset_generation();                                       \
3071   save_marks();                                                 \
3072 }
3073 
3074 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3075 
3076 void
3077 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
3078 {
3079   // Not currently implemented; need to do the following. -- ysr.
3080   // dld -- I think that is used for some sort of allocation profiler.  So it
3081   // really means the objects allocated by the mutator since the last
3082   // GC.  We could potentially implement this cheaply by recording only
3083   // the direct allocations in a side data structure.
3084   //
3085   // I think we probably ought not to be required to support these
3086   // iterations at any arbitrary point; I think there ought to be some
3087   // call to enable/disable allocation profiling in a generation/space,
3088   // and the iterator ought to return the objects allocated in the
3089   // gen/space since the enable call, or the last iterator call (which
3090   // will probably be at a GC.)  That way, for gens like CM&S that would
3091   // require some extra data structure to support this, we only pay the
3092   // cost when it's in use...
3093   cmsSpace()->object_iterate_since_last_GC(blk);
3094 }
3095 
3096 void
3097 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3098   cl->set_generation(this);
3099   younger_refs_in_space_iterate(_cmsSpace, cl);
3100   cl->reset_generation();
3101 }
3102 
3103 void
3104 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
3105   if (freelistLock()->owned_by_self()) {
3106     Generation::oop_iterate(mr, cl);
3107   } else {
3108     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3109     Generation::oop_iterate(mr, cl);
3110   }
3111 }
3112 
3113 void
3114 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
3115   if (freelistLock()->owned_by_self()) {
3116     Generation::oop_iterate(cl);
3117   } else {
3118     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3119     Generation::oop_iterate(cl);
3120   }
3121 }
3122 
3123 void
3124 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3125   if (freelistLock()->owned_by_self()) {
3126     Generation::object_iterate(cl);
3127   } else {
3128     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3129     Generation::object_iterate(cl);
3130   }
3131 }
3132 
3133 void
3134 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3135   if (freelistLock()->owned_by_self()) {
3136     Generation::safe_object_iterate(cl);
3137   } else {
3138     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3139     Generation::safe_object_iterate(cl);
3140   }
3141 }
3142 
3143 void
3144 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3145 }
3146 
3147 void
3148 ConcurrentMarkSweepGeneration::post_compact() {
3149 }
3150 
3151 void
3152 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3153   // Fix the linear allocation blocks to look like free blocks.
3154 
3155   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3156   // are not called when the heap is verified during universe initialization and
3157   // at vm shutdown.
3158   if (freelistLock()->owned_by_self()) {
3159     cmsSpace()->prepare_for_verify();
3160   } else {
3161     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3162     cmsSpace()->prepare_for_verify();
3163   }
3164 }
3165 
3166 void
3167 ConcurrentMarkSweepGeneration::verify() {
3168   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3169   // are not called when the heap is verified during universe initialization and
3170   // at vm shutdown.
3171   if (freelistLock()->owned_by_self()) {
3172     cmsSpace()->verify();
3173   } else {
3174     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3175     cmsSpace()->verify();
3176   }
3177 }
3178 
3179 void CMSCollector::verify() {
3180   _cmsGen->verify();
3181   _permGen->verify();
3182 }
3183 
3184 #ifndef PRODUCT
3185 bool CMSCollector::overflow_list_is_empty() const {
3186   assert(_num_par_pushes >= 0, "Inconsistency");
3187   if (_overflow_list == NULL) {
3188     assert(_num_par_pushes == 0, "Inconsistency");
3189   }
3190   return _overflow_list == NULL;
3191 }
3192 
3193 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3194 // merely consolidate assertion checks that appear to occur together frequently.
3195 void CMSCollector::verify_work_stacks_empty() const {
3196   assert(_markStack.isEmpty(), "Marking stack should be empty");
3197   assert(overflow_list_is_empty(), "Overflow list should be empty");
3198 }
3199 
3200 void CMSCollector::verify_overflow_empty() const {
3201   assert(overflow_list_is_empty(), "Overflow list should be empty");
3202   assert(no_preserved_marks(), "No preserved marks");
3203 }
3204 #endif // PRODUCT
3205 
3206 // Decide if we want to enable class unloading as part of the
3207 // ensuing concurrent GC cycle. We will collect the perm gen and
3208 // unload classes if it's the case that:
3209 // (1) an explicit gc request has been made and the flag
3210 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3211 // (2) (a) class unloading is enabled at the command line, and
3212 //     (b) (i)   perm gen threshold has been crossed, or
3213 //         (ii)  old gen is getting really full, or
3214 //         (iii) the previous N CMS collections did not collect the
3215 //               perm gen
3216 // NOTE: Provided there is no change in the state of the heap between
3217 // calls to this method, it should have idempotent results. Moreover,
3218 // its results should be monotonically increasing (i.e. going from 0 to 1,
3219 // but not 1 to 0) between successive calls between which the heap was
3220 // not collected. For the implementation below, it must thus rely on
3221 // the property that concurrent_cycles_since_last_unload()
3222 // will not decrease unless a collection cycle happened and that
3223 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3224 // themselves also monotonic in that sense. See check_monotonicity()
3225 // below.
3226 bool CMSCollector::update_should_unload_classes() {
3227   _should_unload_classes = false;
3228   // Condition 1 above
3229   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3230     _should_unload_classes = true;
3231   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3232     // Disjuncts 2.b.(i,ii,iii) above
3233     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3234                               CMSClassUnloadingMaxInterval)
3235                            || _permGen->should_concurrent_collect()
3236                            || _cmsGen->is_too_full();
3237   }
3238   return _should_unload_classes;
3239 }
3240 
3241 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3242   bool res = should_concurrent_collect();
3243   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3244   return res;
3245 }
3246 
3247 void CMSCollector::setup_cms_unloading_and_verification_state() {
3248   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3249                              || VerifyBeforeExit;
3250   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3251 
3252   if (should_unload_classes()) {   // Should unload classes this cycle
3253     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3254     set_verifying(should_verify);    // Set verification state for this cycle
3255     return;                            // Nothing else needs to be done at this time
3256   }
3257 
3258   // Not unloading classes this cycle
3259   assert(!should_unload_classes(), "Inconsitency!");
3260   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3261     // We were not verifying, or we _were_ unloading classes in the last cycle,
3262     // AND some verification options are enabled this cycle; in this case,
3263     // we must make sure that the deadness map is allocated if not already so,
3264     // and cleared (if already allocated previously --
3265     // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3266     if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3267       if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3268         warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3269                 "permanent generation verification disabled");
3270         return;  // Note that we leave verification disabled, so we'll retry this
3271                  // allocation next cycle. We _could_ remember this failure
3272                  // and skip further attempts and permanently disable verification
3273                  // attempts if that is considered more desirable.
3274       }
3275       assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3276               "_perm_gen_ver_bit_map inconsistency?");
3277     } else {
3278       perm_gen_verify_bit_map()->clear_all();
3279     }
3280     // Include symbols, strings and code cache elements to prevent their resurrection.
3281     add_root_scanning_option(rso);
3282     set_verifying(true);
3283   } else if (verifying() && !should_verify) {
3284     // We were verifying, but some verification flags got disabled.
3285     set_verifying(false);
3286     // Exclude symbols, strings and code cache elements from root scanning to
3287     // reduce IM and RM pauses.
3288     remove_root_scanning_option(rso);
3289   }
3290 }
3291 
3292 
3293 #ifndef PRODUCT
3294 HeapWord* CMSCollector::block_start(const void* p) const {
3295   const HeapWord* addr = (HeapWord*)p;
3296   if (_span.contains(p)) {
3297     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3298       return _cmsGen->cmsSpace()->block_start(p);
3299     } else {
3300       assert(_permGen->cmsSpace()->is_in_reserved(addr),
3301              "Inconsistent _span?");
3302       return _permGen->cmsSpace()->block_start(p);
3303     }
3304   }
3305   return NULL;
3306 }
3307 #endif
3308 
3309 HeapWord*
3310 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3311                                                    bool   tlab,
3312                                                    bool   parallel) {
3313   CMSSynchronousYieldRequest yr;
3314   assert(!tlab, "Can't deal with TLAB allocation");
3315   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3316   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3317     CMSExpansionCause::_satisfy_allocation);
3318   if (GCExpandToAllocateDelayMillis > 0) {
3319     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3320   }
3321   return have_lock_and_allocate(word_size, tlab);
3322 }
3323 
3324 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3325 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3326 // to CardGeneration and share it...
3327 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3328   return CardGeneration::expand(bytes, expand_bytes);
3329 }
3330 
3331 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3332   CMSExpansionCause::Cause cause)
3333 {
3334 
3335   bool success = expand(bytes, expand_bytes);
3336 
3337   // remember why we expanded; this information is used
3338   // by shouldConcurrentCollect() when making decisions on whether to start
3339   // a new CMS cycle.
3340   if (success) {
3341     set_expansion_cause(cause);
3342     if (PrintGCDetails && Verbose) {
3343       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3344         CMSExpansionCause::to_string(cause));
3345     }
3346   }
3347 }
3348 
3349 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3350   HeapWord* res = NULL;
3351   MutexLocker x(ParGCRareEvent_lock);
3352   while (true) {
3353     // Expansion by some other thread might make alloc OK now:
3354     res = ps->lab.alloc(word_sz);
3355     if (res != NULL) return res;
3356     // If there's not enough expansion space available, give up.
3357     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3358       return NULL;
3359     }
3360     // Otherwise, we try expansion.
3361     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3362       CMSExpansionCause::_allocate_par_lab);
3363     // Now go around the loop and try alloc again;
3364     // A competing par_promote might beat us to the expansion space,
3365     // so we may go around the loop again if promotion fails agaion.
3366     if (GCExpandToAllocateDelayMillis > 0) {
3367       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3368     }
3369   }
3370 }
3371 
3372 
3373 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3374   PromotionInfo* promo) {
3375   MutexLocker x(ParGCRareEvent_lock);
3376   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3377   while (true) {
3378     // Expansion by some other thread might make alloc OK now:
3379     if (promo->ensure_spooling_space()) {
3380       assert(promo->has_spooling_space(),
3381              "Post-condition of successful ensure_spooling_space()");
3382       return true;
3383     }
3384     // If there's not enough expansion space available, give up.
3385     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3386       return false;
3387     }
3388     // Otherwise, we try expansion.
3389     expand(refill_size_bytes, MinHeapDeltaBytes,
3390       CMSExpansionCause::_allocate_par_spooling_space);
3391     // Now go around the loop and try alloc again;
3392     // A competing allocation might beat us to the expansion space,
3393     // so we may go around the loop again if allocation fails again.
3394     if (GCExpandToAllocateDelayMillis > 0) {
3395       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3396     }
3397   }
3398 }
3399 
3400 
3401 
3402 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3403   assert_locked_or_safepoint(Heap_lock);
3404   size_t size = ReservedSpace::page_align_size_down(bytes);
3405   if (size > 0) {
3406     shrink_by(size);
3407   }
3408 }
3409 
3410 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3411   assert_locked_or_safepoint(Heap_lock);
3412   bool result = _virtual_space.expand_by(bytes);
3413   if (result) {
3414     HeapWord* old_end = _cmsSpace->end();
3415     size_t new_word_size =
3416       heap_word_size(_virtual_space.committed_size());
3417     MemRegion mr(_cmsSpace->bottom(), new_word_size);
3418     _bts->resize(new_word_size);  // resize the block offset shared array
3419     Universe::heap()->barrier_set()->resize_covered_region(mr);
3420     // Hmmmm... why doesn't CFLS::set_end verify locking?
3421     // This is quite ugly; FIX ME XXX
3422     _cmsSpace->assert_locked(freelistLock());
3423     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3424 
3425     // update the space and generation capacity counters
3426     if (UsePerfData) {
3427       _space_counters->update_capacity();
3428       _gen_counters->update_all();
3429     }
3430 
3431     if (Verbose && PrintGC) {
3432       size_t new_mem_size = _virtual_space.committed_size();
3433       size_t old_mem_size = new_mem_size - bytes;
3434       gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3435                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
3436     }
3437   }
3438   return result;
3439 }
3440 
3441 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3442   assert_locked_or_safepoint(Heap_lock);
3443   bool success = true;
3444   const size_t remaining_bytes = _virtual_space.uncommitted_size();
3445   if (remaining_bytes > 0) {
3446     success = grow_by(remaining_bytes);
3447     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3448   }
3449   return success;
3450 }
3451 
3452 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3453   assert_locked_or_safepoint(Heap_lock);
3454   assert_lock_strong(freelistLock());
3455   // XXX Fix when compaction is implemented.
3456   warning("Shrinking of CMS not yet implemented");
3457   return;
3458 }
3459 
3460 
3461 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3462 // phases.
3463 class CMSPhaseAccounting: public StackObj {
3464  public:
3465   CMSPhaseAccounting(CMSCollector *collector,
3466                      const char *phase,
3467                      bool print_cr = true);
3468   ~CMSPhaseAccounting();
3469 
3470  private:
3471   CMSCollector *_collector;
3472   const char *_phase;
3473   elapsedTimer _wallclock;
3474   bool _print_cr;
3475 
3476  public:
3477   // Not MT-safe; so do not pass around these StackObj's
3478   // where they may be accessed by other threads.
3479   jlong wallclock_millis() {
3480     assert(_wallclock.is_active(), "Wall clock should not stop");
3481     _wallclock.stop();  // to record time
3482     jlong ret = _wallclock.milliseconds();
3483     _wallclock.start(); // restart
3484     return ret;
3485   }
3486 };
3487 
3488 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3489                                        const char *phase,
3490                                        bool print_cr) :
3491   _collector(collector), _phase(phase), _print_cr(print_cr) {
3492 
3493   if (PrintCMSStatistics != 0) {
3494     _collector->resetYields();
3495   }
3496   if (PrintGCDetails && PrintGCTimeStamps) {
3497     gclog_or_tty->date_stamp(PrintGCDateStamps);
3498     gclog_or_tty->stamp();
3499     gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3500       _collector->cmsGen()->short_name(), _phase);
3501   }
3502   _collector->resetTimer();
3503   _wallclock.start();
3504   _collector->startTimer();
3505 }
3506 
3507 CMSPhaseAccounting::~CMSPhaseAccounting() {
3508   assert(_wallclock.is_active(), "Wall clock should not have stopped");
3509   _collector->stopTimer();
3510   _wallclock.stop();
3511   if (PrintGCDetails) {
3512     gclog_or_tty->date_stamp(PrintGCDateStamps);
3513     gclog_or_tty->stamp(PrintGCTimeStamps);
3514     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3515                  _collector->cmsGen()->short_name(),
3516                  _phase, _collector->timerValue(), _wallclock.seconds());
3517     if (_print_cr) {
3518       gclog_or_tty->print_cr("");
3519     }
3520     if (PrintCMSStatistics != 0) {
3521       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3522                     _collector->yields());
3523     }
3524   }
3525 }
3526 
3527 // CMS work
3528 
3529 // Checkpoint the roots into this generation from outside
3530 // this generation. [Note this initial checkpoint need only
3531 // be approximate -- we'll do a catch up phase subsequently.]
3532 void CMSCollector::checkpointRootsInitial(bool asynch) {
3533   assert(_collectorState == InitialMarking, "Wrong collector state");
3534   check_correct_thread_executing();
3535   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3536 
3537   save_heap_summary();
3538   report_heap_summary(GCWhen::BeforeGC);
3539 
3540   ReferenceProcessor* rp = ref_processor();
3541   SpecializationStats::clear();
3542   assert(_restart_addr == NULL, "Control point invariant");
3543   if (asynch) {
3544     // acquire locks for subsequent manipulations
3545     MutexLockerEx x(bitMapLock(),
3546                     Mutex::_no_safepoint_check_flag);
3547     checkpointRootsInitialWork(asynch);
3548     // enable ("weak") refs discovery
3549     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3550     _collectorState = Marking;
3551   } else {
3552     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3553     // which recognizes if we are a CMS generation, and doesn't try to turn on
3554     // discovery; verify that they aren't meddling.
3555     assert(!rp->discovery_is_atomic(),
3556            "incorrect setting of discovery predicate");
3557     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3558            "ref discovery for this generation kind");
3559     // already have locks
3560     checkpointRootsInitialWork(asynch);
3561     // now enable ("weak") refs discovery
3562     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3563     _collectorState = Marking;
3564   }
3565   SpecializationStats::print();
3566 }
3567 
3568 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3569   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3570   assert(_collectorState == InitialMarking, "just checking");
3571 
3572   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3573   // precede our marking with a collection of all
3574   // younger generations to keep floating garbage to a minimum.
3575   // XXX: we won't do this for now -- it's an optimization to be done later.
3576 
3577   // already have locks
3578   assert_lock_strong(bitMapLock());
3579   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3580 
3581   // Setup the verification and class unloading state for this
3582   // CMS collection cycle.
3583   setup_cms_unloading_and_verification_state();
3584 
3585   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3586     PrintGCDetails && Verbose, true, _gc_timer_cm);)
3587   if (UseAdaptiveSizePolicy) {
3588     size_policy()->checkpoint_roots_initial_begin();
3589   }
3590 
3591   // Reset all the PLAB chunk arrays if necessary.
3592   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3593     reset_survivor_plab_arrays();
3594   }
3595 
3596   ResourceMark rm;
3597   HandleMark  hm;
3598 
3599   FalseClosure falseClosure;
3600   // In the case of a synchronous collection, we will elide the
3601   // remark step, so it's important to catch all the nmethod oops
3602   // in this step.
3603   // The final 'true' flag to gen_process_strong_roots will ensure this.
3604   // If 'async' is true, we can relax the nmethod tracing.
3605   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3606   GenCollectedHeap* gch = GenCollectedHeap::heap();
3607 
3608   verify_work_stacks_empty();
3609   verify_overflow_empty();
3610 
3611   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3612   // Update the saved marks which may affect the root scans.
3613   gch->save_marks();
3614 
3615   // weak reference processing has not started yet.
3616   ref_processor()->set_enqueuing_is_done(false);
3617 
3618   {
3619     // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3620     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3621     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3622     gch->gen_process_strong_roots(_cmsGen->level(),
3623                                   true,   // younger gens are roots
3624                                   true,   // activate StrongRootsScope
3625                                   true,   // collecting perm gen
3626                                   SharedHeap::ScanningOption(roots_scanning_options()),
3627                                   &notOlder,
3628                                   true,   // walk all of code cache if (so & SO_CodeCache)
3629                                   NULL);
3630   }
3631 
3632   // Clear mod-union table; it will be dirtied in the prologue of
3633   // CMS generation per each younger generation collection.
3634 
3635   assert(_modUnionTable.isAllClear(),
3636        "Was cleared in most recent final checkpoint phase"
3637        " or no bits are set in the gc_prologue before the start of the next "
3638        "subsequent marking phase.");
3639 
3640   // Save the end of the used_region of the constituent generations
3641   // to be used to limit the extent of sweep in each generation.
3642   save_sweep_limits();
3643   if (UseAdaptiveSizePolicy) {
3644     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3645   }
3646   verify_overflow_empty();
3647 }
3648 
3649 bool CMSCollector::markFromRoots(bool asynch) {
3650   // we might be tempted to assert that:
3651   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3652   //        "inconsistent argument?");
3653   // However that wouldn't be right, because it's possible that
3654   // a safepoint is indeed in progress as a younger generation
3655   // stop-the-world GC happens even as we mark in this generation.
3656   assert(_collectorState == Marking, "inconsistent state?");
3657   check_correct_thread_executing();
3658   verify_overflow_empty();
3659 
3660   bool res;
3661   if (asynch) {
3662 
3663     // Start the timers for adaptive size policy for the concurrent phases
3664     // Do it here so that the foreground MS can use the concurrent
3665     // timer since a foreground MS might has the sweep done concurrently
3666     // or STW.
3667     if (UseAdaptiveSizePolicy) {
3668       size_policy()->concurrent_marking_begin();
3669     }
3670 
3671     // Weak ref discovery note: We may be discovering weak
3672     // refs in this generation concurrent (but interleaved) with
3673     // weak ref discovery by a younger generation collector.
3674 
3675     CMSTokenSyncWithLocks ts(true, bitMapLock());
3676     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3677     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3678     res = markFromRootsWork(asynch);
3679     if (res) {
3680       _collectorState = Precleaning;
3681     } else { // We failed and a foreground collection wants to take over
3682       assert(_foregroundGCIsActive, "internal state inconsistency");
3683       assert(_restart_addr == NULL,  "foreground will restart from scratch");
3684       if (PrintGCDetails) {
3685         gclog_or_tty->print_cr("bailing out to foreground collection");
3686       }
3687     }
3688     if (UseAdaptiveSizePolicy) {
3689       size_policy()->concurrent_marking_end();
3690     }
3691   } else {
3692     assert(SafepointSynchronize::is_at_safepoint(),
3693            "inconsistent with asynch == false");
3694     if (UseAdaptiveSizePolicy) {
3695       size_policy()->ms_collection_marking_begin();
3696     }
3697     // already have locks
3698     res = markFromRootsWork(asynch);
3699     _collectorState = FinalMarking;
3700     if (UseAdaptiveSizePolicy) {
3701       GenCollectedHeap* gch = GenCollectedHeap::heap();
3702       size_policy()->ms_collection_marking_end(gch->gc_cause());
3703     }
3704   }
3705   verify_overflow_empty();
3706   return res;
3707 }
3708 
3709 bool CMSCollector::markFromRootsWork(bool asynch) {
3710   // iterate over marked bits in bit map, doing a full scan and mark
3711   // from these roots using the following algorithm:
3712   // . if oop is to the right of the current scan pointer,
3713   //   mark corresponding bit (we'll process it later)
3714   // . else (oop is to left of current scan pointer)
3715   //   push oop on marking stack
3716   // . drain the marking stack
3717 
3718   // Note that when we do a marking step we need to hold the
3719   // bit map lock -- recall that direct allocation (by mutators)
3720   // and promotion (by younger generation collectors) is also
3721   // marking the bit map. [the so-called allocate live policy.]
3722   // Because the implementation of bit map marking is not
3723   // robust wrt simultaneous marking of bits in the same word,
3724   // we need to make sure that there is no such interference
3725   // between concurrent such updates.
3726 
3727   // already have locks
3728   assert_lock_strong(bitMapLock());
3729 
3730   // Clear the revisit stack, just in case there are any
3731   // obsolete contents from a short-circuited previous CMS cycle.
3732   _revisitStack.reset();
3733   verify_work_stacks_empty();
3734   verify_overflow_empty();
3735   assert(_revisitStack.isEmpty(), "tabula rasa");
3736   DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
3737   bool result = false;
3738   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3739     result = do_marking_mt(asynch);
3740   } else {
3741     result = do_marking_st(asynch);
3742   }
3743   return result;
3744 }
3745 
3746 // Forward decl
3747 class CMSConcMarkingTask;
3748 
3749 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3750   CMSCollector*       _collector;
3751   CMSConcMarkingTask* _task;
3752  public:
3753   virtual void yield();
3754 
3755   // "n_threads" is the number of threads to be terminated.
3756   // "queue_set" is a set of work queues of other threads.
3757   // "collector" is the CMS collector associated with this task terminator.
3758   // "yield" indicates whether we need the gang as a whole to yield.
3759   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3760     ParallelTaskTerminator(n_threads, queue_set),
3761     _collector(collector) { }
3762 
3763   void set_task(CMSConcMarkingTask* task) {
3764     _task = task;
3765   }
3766 };
3767 
3768 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3769   CMSConcMarkingTask* _task;
3770  public:
3771   bool should_exit_termination();
3772   void set_task(CMSConcMarkingTask* task) {
3773     _task = task;
3774   }
3775 };
3776 
3777 // MT Concurrent Marking Task
3778 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3779   CMSCollector* _collector;
3780   int           _n_workers;                  // requested/desired # workers
3781   bool          _asynch;
3782   bool          _result;
3783   CompactibleFreeListSpace*  _cms_space;
3784   CompactibleFreeListSpace* _perm_space;
3785   char          _pad_front[64];   // padding to ...
3786   HeapWord*     _global_finger;   // ... avoid sharing cache line
3787   char          _pad_back[64];
3788   HeapWord*     _restart_addr;
3789 
3790   //  Exposed here for yielding support
3791   Mutex* const _bit_map_lock;
3792 
3793   // The per thread work queues, available here for stealing
3794   OopTaskQueueSet*  _task_queues;
3795 
3796   // Termination (and yielding) support
3797   CMSConcMarkingTerminator _term;
3798   CMSConcMarkingTerminatorTerminator _term_term;
3799 
3800  public:
3801   CMSConcMarkingTask(CMSCollector* collector,
3802                  CompactibleFreeListSpace* cms_space,
3803                  CompactibleFreeListSpace* perm_space,
3804                  bool asynch,
3805                  YieldingFlexibleWorkGang* workers,
3806                  OopTaskQueueSet* task_queues):
3807     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3808     _collector(collector),
3809     _cms_space(cms_space),
3810     _perm_space(perm_space),
3811     _asynch(asynch), _n_workers(0), _result(true),
3812     _task_queues(task_queues),
3813     _term(_n_workers, task_queues, _collector),
3814     _bit_map_lock(collector->bitMapLock())
3815   {
3816     _requested_size = _n_workers;
3817     _term.set_task(this);
3818     _term_term.set_task(this);
3819     assert(_cms_space->bottom() < _perm_space->bottom(),
3820            "Finger incorrectly initialized below");
3821     _restart_addr = _global_finger = _cms_space->bottom();
3822   }
3823 
3824 
3825   OopTaskQueueSet* task_queues()  { return _task_queues; }
3826 
3827   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3828 
3829   HeapWord** global_finger_addr() { return &_global_finger; }
3830 
3831   CMSConcMarkingTerminator* terminator() { return &_term; }
3832 
3833   virtual void set_for_termination(int active_workers) {
3834     terminator()->reset_for_reuse(active_workers);
3835   }
3836 
3837   void work(uint worker_id);
3838   bool should_yield() {
3839     return    ConcurrentMarkSweepThread::should_yield()
3840            && !_collector->foregroundGCIsActive()
3841            && _asynch;
3842   }
3843 
3844   virtual void coordinator_yield();  // stuff done by coordinator
3845   bool result() { return _result; }
3846 
3847   void reset(HeapWord* ra) {
3848     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3849     assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3850     assert(ra             <  _perm_space->end(), "ra too large");
3851     _restart_addr = _global_finger = ra;
3852     _term.reset_for_reuse();
3853   }
3854 
3855   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3856                                            OopTaskQueue* work_q);
3857 
3858  private:
3859   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3860   void do_work_steal(int i);
3861   void bump_global_finger(HeapWord* f);
3862 };
3863 
3864 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3865   assert(_task != NULL, "Error");
3866   return _task->yielding();
3867   // Note that we do not need the disjunct || _task->should_yield() above
3868   // because we want terminating threads to yield only if the task
3869   // is already in the midst of yielding, which happens only after at least one
3870   // thread has yielded.
3871 }
3872 
3873 void CMSConcMarkingTerminator::yield() {
3874   if (_task->should_yield()) {
3875     _task->yield();
3876   } else {
3877     ParallelTaskTerminator::yield();
3878   }
3879 }
3880 
3881 ////////////////////////////////////////////////////////////////
3882 // Concurrent Marking Algorithm Sketch
3883 ////////////////////////////////////////////////////////////////
3884 // Until all tasks exhausted (both spaces):
3885 // -- claim next available chunk
3886 // -- bump global finger via CAS
3887 // -- find first object that starts in this chunk
3888 //    and start scanning bitmap from that position
3889 // -- scan marked objects for oops
3890 // -- CAS-mark target, and if successful:
3891 //    . if target oop is above global finger (volatile read)
3892 //      nothing to do
3893 //    . if target oop is in chunk and above local finger
3894 //        then nothing to do
3895 //    . else push on work-queue
3896 // -- Deal with possible overflow issues:
3897 //    . local work-queue overflow causes stuff to be pushed on
3898 //      global (common) overflow queue
3899 //    . always first empty local work queue
3900 //    . then get a batch of oops from global work queue if any
3901 //    . then do work stealing
3902 // -- When all tasks claimed (both spaces)
3903 //    and local work queue empty,
3904 //    then in a loop do:
3905 //    . check global overflow stack; steal a batch of oops and trace
3906 //    . try to steal from other threads oif GOS is empty
3907 //    . if neither is available, offer termination
3908 // -- Terminate and return result
3909 //
3910 void CMSConcMarkingTask::work(uint worker_id) {
3911   elapsedTimer _timer;
3912   ResourceMark rm;
3913   HandleMark hm;
3914 
3915   DEBUG_ONLY(_collector->verify_overflow_empty();)
3916 
3917   // Before we begin work, our work queue should be empty
3918   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3919   // Scan the bitmap covering _cms_space, tracing through grey objects.
3920   _timer.start();
3921   do_scan_and_mark(worker_id, _cms_space);
3922   _timer.stop();
3923   if (PrintCMSStatistics != 0) {
3924     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3925       worker_id, _timer.seconds());
3926       // XXX: need xxx/xxx type of notation, two timers
3927   }
3928 
3929   // ... do the same for the _perm_space
3930   _timer.reset();
3931   _timer.start();
3932   do_scan_and_mark(worker_id, _perm_space);
3933   _timer.stop();
3934   if (PrintCMSStatistics != 0) {
3935     gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3936       worker_id, _timer.seconds());
3937       // XXX: need xxx/xxx type of notation, two timers
3938   }
3939 
3940   // ... do work stealing
3941   _timer.reset();
3942   _timer.start();
3943   do_work_steal(worker_id);
3944   _timer.stop();
3945   if (PrintCMSStatistics != 0) {
3946     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3947       worker_id, _timer.seconds());
3948       // XXX: need xxx/xxx type of notation, two timers
3949   }
3950   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3951   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3952   // Note that under the current task protocol, the
3953   // following assertion is true even of the spaces
3954   // expanded since the completion of the concurrent
3955   // marking. XXX This will likely change under a strict
3956   // ABORT semantics.
3957   assert(_global_finger >  _cms_space->end() &&
3958          _global_finger >= _perm_space->end(),
3959          "All tasks have been completed");
3960   DEBUG_ONLY(_collector->verify_overflow_empty();)
3961 }
3962 
3963 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3964   HeapWord* read = _global_finger;
3965   HeapWord* cur  = read;
3966   while (f > read) {
3967     cur = read;
3968     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3969     if (cur == read) {
3970       // our cas succeeded
3971       assert(_global_finger >= f, "protocol consistency");
3972       break;
3973     }
3974   }
3975 }
3976 
3977 // This is really inefficient, and should be redone by
3978 // using (not yet available) block-read and -write interfaces to the
3979 // stack and the work_queue. XXX FIX ME !!!
3980 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3981                                                       OopTaskQueue* work_q) {
3982   // Fast lock-free check
3983   if (ovflw_stk->length() == 0) {
3984     return false;
3985   }
3986   assert(work_q->size() == 0, "Shouldn't steal");
3987   MutexLockerEx ml(ovflw_stk->par_lock(),
3988                    Mutex::_no_safepoint_check_flag);
3989   // Grab up to 1/4 the size of the work queue
3990   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3991                     (size_t)ParGCDesiredObjsFromOverflowList);
3992   num = MIN2(num, ovflw_stk->length());
3993   for (int i = (int) num; i > 0; i--) {
3994     oop cur = ovflw_stk->pop();
3995     assert(cur != NULL, "Counted wrong?");
3996     work_q->push(cur);
3997   }
3998   return num > 0;
3999 }
4000 
4001 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4002   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4003   int n_tasks = pst->n_tasks();
4004   // We allow that there may be no tasks to do here because
4005   // we are restarting after a stack overflow.
4006   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4007   uint nth_task = 0;
4008 
4009   HeapWord* aligned_start = sp->bottom();
4010   if (sp->used_region().contains(_restart_addr)) {
4011     // Align down to a card boundary for the start of 0th task
4012     // for this space.
4013     aligned_start =
4014       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4015                                  CardTableModRefBS::card_size);
4016   }
4017 
4018   size_t chunk_size = sp->marking_task_size();
4019   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4020     // Having claimed the nth task in this space,
4021     // compute the chunk that it corresponds to:
4022     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4023                                aligned_start + (nth_task+1)*chunk_size);
4024     // Try and bump the global finger via a CAS;
4025     // note that we need to do the global finger bump
4026     // _before_ taking the intersection below, because
4027     // the task corresponding to that region will be
4028     // deemed done even if the used_region() expands
4029     // because of allocation -- as it almost certainly will
4030     // during start-up while the threads yield in the
4031     // closure below.
4032     HeapWord* finger = span.end();
4033     bump_global_finger(finger);   // atomically
4034     // There are null tasks here corresponding to chunks
4035     // beyond the "top" address of the space.
4036     span = span.intersection(sp->used_region());
4037     if (!span.is_empty()) {  // Non-null task
4038       HeapWord* prev_obj;
4039       assert(!span.contains(_restart_addr) || nth_task == 0,
4040              "Inconsistency");
4041       if (nth_task == 0) {
4042         // For the 0th task, we'll not need to compute a block_start.
4043         if (span.contains(_restart_addr)) {
4044           // In the case of a restart because of stack overflow,
4045           // we might additionally skip a chunk prefix.
4046           prev_obj = _restart_addr;
4047         } else {
4048           prev_obj = span.start();
4049         }
4050       } else {
4051         // We want to skip the first object because
4052         // the protocol is to scan any object in its entirety
4053         // that _starts_ in this span; a fortiori, any
4054         // object starting in an earlier span is scanned
4055         // as part of an earlier claimed task.
4056         // Below we use the "careful" version of block_start
4057         // so we do not try to navigate uninitialized objects.
4058         prev_obj = sp->block_start_careful(span.start());
4059         // Below we use a variant of block_size that uses the
4060         // Printezis bits to avoid waiting for allocated
4061         // objects to become initialized/parsable.
4062         while (prev_obj < span.start()) {
4063           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4064           if (sz > 0) {
4065             prev_obj += sz;
4066           } else {
4067             // In this case we may end up doing a bit of redundant
4068             // scanning, but that appears unavoidable, short of
4069             // locking the free list locks; see bug 6324141.
4070             break;
4071           }
4072         }
4073       }
4074       if (prev_obj < span.end()) {
4075         MemRegion my_span = MemRegion(prev_obj, span.end());
4076         // Do the marking work within a non-empty span --
4077         // the last argument to the constructor indicates whether the
4078         // iteration should be incremental with periodic yields.
4079         Par_MarkFromRootsClosure cl(this, _collector, my_span,
4080                                     &_collector->_markBitMap,
4081                                     work_queue(i),
4082                                     &_collector->_markStack,
4083                                     &_collector->_revisitStack,
4084                                     _asynch);
4085         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4086       } // else nothing to do for this task
4087     }   // else nothing to do for this task
4088   }
4089   // We'd be tempted to assert here that since there are no
4090   // more tasks left to claim in this space, the global_finger
4091   // must exceed space->top() and a fortiori space->end(). However,
4092   // that would not quite be correct because the bumping of
4093   // global_finger occurs strictly after the claiming of a task,
4094   // so by the time we reach here the global finger may not yet
4095   // have been bumped up by the thread that claimed the last
4096   // task.
4097   pst->all_tasks_completed();
4098 }
4099 
4100 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
4101  private:
4102   CMSConcMarkingTask* _task;
4103   MemRegion     _span;
4104   CMSBitMap*    _bit_map;
4105   CMSMarkStack* _overflow_stack;
4106   OopTaskQueue* _work_queue;
4107  protected:
4108   DO_OOP_WORK_DEFN
4109  public:
4110   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4111                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
4112                          CMSMarkStack* revisit_stack):
4113     Par_KlassRememberingOopClosure(collector, collector->ref_processor(), revisit_stack),
4114     _task(task),
4115     _span(collector->_span),
4116     _work_queue(work_queue),
4117     _bit_map(bit_map),
4118     _overflow_stack(overflow_stack)
4119   { }
4120   virtual void do_oop(oop* p);
4121   virtual void do_oop(narrowOop* p);
4122   void trim_queue(size_t max);
4123   void handle_stack_overflow(HeapWord* lost);
4124   void do_yield_check() {
4125     if (_task->should_yield()) {
4126       _task->yield();
4127     }
4128   }
4129 };
4130 
4131 // Grey object scanning during work stealing phase --
4132 // the salient assumption here is that any references
4133 // that are in these stolen objects being scanned must
4134 // already have been initialized (else they would not have
4135 // been published), so we do not need to check for
4136 // uninitialized objects before pushing here.
4137 void Par_ConcMarkingClosure::do_oop(oop obj) {
4138   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4139   HeapWord* addr = (HeapWord*)obj;
4140   // Check if oop points into the CMS generation
4141   // and is not marked
4142   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4143     // a white object ...
4144     // If we manage to "claim" the object, by being the
4145     // first thread to mark it, then we push it on our
4146     // marking stack
4147     if (_bit_map->par_mark(addr)) {     // ... now grey
4148       // push on work queue (grey set)
4149       bool simulate_overflow = false;
4150       NOT_PRODUCT(
4151         if (CMSMarkStackOverflowALot &&
4152             _collector->simulate_overflow()) {
4153           // simulate a stack overflow
4154           simulate_overflow = true;
4155         }
4156       )
4157       if (simulate_overflow ||
4158           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4159         // stack overflow
4160         if (PrintCMSStatistics != 0) {
4161           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4162                                  SIZE_FORMAT, _overflow_stack->capacity());
4163         }
4164         // We cannot assert that the overflow stack is full because
4165         // it may have been emptied since.
4166         assert(simulate_overflow ||
4167                _work_queue->size() == _work_queue->max_elems(),
4168               "Else push should have succeeded");
4169         handle_stack_overflow(addr);
4170       }
4171     } // Else, some other thread got there first
4172     do_yield_check();
4173   }
4174 }
4175 
4176 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
4177 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4178 
4179 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4180   while (_work_queue->size() > max) {
4181     oop new_oop;
4182     if (_work_queue->pop_local(new_oop)) {
4183       assert(new_oop->is_oop(), "Should be an oop");
4184       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4185       assert(_span.contains((HeapWord*)new_oop), "Not in span");
4186       assert(new_oop->is_parsable(), "Should be parsable");
4187       new_oop->oop_iterate(this);  // do_oop() above
4188       do_yield_check();
4189     }
4190   }
4191 }
4192 
4193 // Upon stack overflow, we discard (part of) the stack,
4194 // remembering the least address amongst those discarded
4195 // in CMSCollector's _restart_address.
4196 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4197   // We need to do this under a mutex to prevent other
4198   // workers from interfering with the work done below.
4199   MutexLockerEx ml(_overflow_stack->par_lock(),
4200                    Mutex::_no_safepoint_check_flag);
4201   // Remember the least grey address discarded
4202   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4203   _collector->lower_restart_addr(ra);
4204   _overflow_stack->reset();  // discard stack contents
4205   _overflow_stack->expand(); // expand the stack if possible
4206 }
4207 
4208 
4209 void CMSConcMarkingTask::do_work_steal(int i) {
4210   OopTaskQueue* work_q = work_queue(i);
4211   oop obj_to_scan;
4212   CMSBitMap* bm = &(_collector->_markBitMap);
4213   CMSMarkStack* ovflw = &(_collector->_markStack);
4214   CMSMarkStack* revisit = &(_collector->_revisitStack);
4215   int* seed = _collector->hash_seed(i);
4216   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw, revisit);
4217   while (true) {
4218     cl.trim_queue(0);
4219     assert(work_q->size() == 0, "Should have been emptied above");
4220     if (get_work_from_overflow_stack(ovflw, work_q)) {
4221       // Can't assert below because the work obtained from the
4222       // overflow stack may already have been stolen from us.
4223       // assert(work_q->size() > 0, "Work from overflow stack");
4224       continue;
4225     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4226       assert(obj_to_scan->is_oop(), "Should be an oop");
4227       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4228       obj_to_scan->oop_iterate(&cl);
4229     } else if (terminator()->offer_termination(&_term_term)) {
4230       assert(work_q->size() == 0, "Impossible!");
4231       break;
4232     } else if (yielding() || should_yield()) {
4233       yield();
4234     }
4235   }
4236 }
4237 
4238 // This is run by the CMS (coordinator) thread.
4239 void CMSConcMarkingTask::coordinator_yield() {
4240   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4241          "CMS thread should hold CMS token");
4242   DEBUG_ONLY(RememberKlassesChecker mux(false);)
4243   // First give up the locks, then yield, then re-lock
4244   // We should probably use a constructor/destructor idiom to
4245   // do this unlock/lock or modify the MutexUnlocker class to
4246   // serve our purpose. XXX
4247   assert_lock_strong(_bit_map_lock);
4248   _bit_map_lock->unlock();
4249   ConcurrentMarkSweepThread::desynchronize(true);
4250   ConcurrentMarkSweepThread::acknowledge_yield_request();
4251   _collector->stopTimer();
4252   if (PrintCMSStatistics != 0) {
4253     _collector->incrementYields();
4254   }
4255   _collector->icms_wait();
4256 
4257   // It is possible for whichever thread initiated the yield request
4258   // not to get a chance to wake up and take the bitmap lock between
4259   // this thread releasing it and reacquiring it. So, while the
4260   // should_yield() flag is on, let's sleep for a bit to give the
4261   // other thread a chance to wake up. The limit imposed on the number
4262   // of iterations is defensive, to avoid any unforseen circumstances
4263   // putting us into an infinite loop. Since it's always been this
4264   // (coordinator_yield()) method that was observed to cause the
4265   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4266   // which is by default non-zero. For the other seven methods that
4267   // also perform the yield operation, as are using a different
4268   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4269   // can enable the sleeping for those methods too, if necessary.
4270   // See 6442774.
4271   //
4272   // We really need to reconsider the synchronization between the GC
4273   // thread and the yield-requesting threads in the future and we
4274   // should really use wait/notify, which is the recommended
4275   // way of doing this type of interaction. Additionally, we should
4276   // consolidate the eight methods that do the yield operation and they
4277   // are almost identical into one for better maintenability and
4278   // readability. See 6445193.
4279   //
4280   // Tony 2006.06.29
4281   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4282                    ConcurrentMarkSweepThread::should_yield() &&
4283                    !CMSCollector::foregroundGCIsActive(); ++i) {
4284     os::sleep(Thread::current(), 1, false);
4285     ConcurrentMarkSweepThread::acknowledge_yield_request();
4286   }
4287 
4288   ConcurrentMarkSweepThread::synchronize(true);
4289   _bit_map_lock->lock_without_safepoint_check();
4290   _collector->startTimer();
4291 }
4292 
4293 bool CMSCollector::do_marking_mt(bool asynch) {
4294   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4295   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4296                                        conc_workers()->total_workers(),
4297                                        conc_workers()->active_workers(),
4298                                        Threads::number_of_non_daemon_threads());
4299   conc_workers()->set_active_workers(num_workers);
4300 
4301   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4302   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4303 
4304   CMSConcMarkingTask tsk(this,
4305                          cms_space,
4306                          perm_space,
4307                          asynch,
4308                          conc_workers(),
4309                          task_queues());
4310 
4311   // Since the actual number of workers we get may be different
4312   // from the number we requested above, do we need to do anything different
4313   // below? In particular, may be we need to subclass the SequantialSubTasksDone
4314   // class?? XXX
4315   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4316   perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4317 
4318   // Refs discovery is already non-atomic.
4319   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4320   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4321   DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
4322   conc_workers()->start_task(&tsk);
4323   while (tsk.yielded()) {
4324     tsk.coordinator_yield();
4325     conc_workers()->continue_task(&tsk);
4326   }
4327   // If the task was aborted, _restart_addr will be non-NULL
4328   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4329   while (_restart_addr != NULL) {
4330     // XXX For now we do not make use of ABORTED state and have not
4331     // yet implemented the right abort semantics (even in the original
4332     // single-threaded CMS case). That needs some more investigation
4333     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4334     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4335     // If _restart_addr is non-NULL, a marking stack overflow
4336     // occurred; we need to do a fresh marking iteration from the
4337     // indicated restart address.
4338     if (_foregroundGCIsActive && asynch) {
4339       // We may be running into repeated stack overflows, having
4340       // reached the limit of the stack size, while making very
4341       // slow forward progress. It may be best to bail out and
4342       // let the foreground collector do its job.
4343       // Clear _restart_addr, so that foreground GC
4344       // works from scratch. This avoids the headache of
4345       // a "rescan" which would otherwise be needed because
4346       // of the dirty mod union table & card table.
4347       _restart_addr = NULL;
4348       return false;
4349     }
4350     // Adjust the task to restart from _restart_addr
4351     tsk.reset(_restart_addr);
4352     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4353                   _restart_addr);
4354     perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4355                   _restart_addr);
4356     _restart_addr = NULL;
4357     // Get the workers going again
4358     conc_workers()->start_task(&tsk);
4359     while (tsk.yielded()) {
4360       tsk.coordinator_yield();
4361       conc_workers()->continue_task(&tsk);
4362     }
4363   }
4364   assert(tsk.completed(), "Inconsistency");
4365   assert(tsk.result() == true, "Inconsistency");
4366   return true;
4367 }
4368 
4369 bool CMSCollector::do_marking_st(bool asynch) {
4370   ResourceMark rm;
4371   HandleMark   hm;
4372 
4373   // Temporarily make refs discovery single threaded (non-MT)
4374   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4375   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4376     &_markStack, &_revisitStack, CMSYield && asynch);
4377   // the last argument to iterate indicates whether the iteration
4378   // should be incremental with periodic yields.
4379   _markBitMap.iterate(&markFromRootsClosure);
4380   // If _restart_addr is non-NULL, a marking stack overflow
4381   // occurred; we need to do a fresh iteration from the
4382   // indicated restart address.
4383   while (_restart_addr != NULL) {
4384     if (_foregroundGCIsActive && asynch) {
4385       // We may be running into repeated stack overflows, having
4386       // reached the limit of the stack size, while making very
4387       // slow forward progress. It may be best to bail out and
4388       // let the foreground collector do its job.
4389       // Clear _restart_addr, so that foreground GC
4390       // works from scratch. This avoids the headache of
4391       // a "rescan" which would otherwise be needed because
4392       // of the dirty mod union table & card table.
4393       _restart_addr = NULL;
4394       return false;  // indicating failure to complete marking
4395     }
4396     // Deal with stack overflow:
4397     // we restart marking from _restart_addr
4398     HeapWord* ra = _restart_addr;
4399     markFromRootsClosure.reset(ra);
4400     _restart_addr = NULL;
4401     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4402   }
4403   return true;
4404 }
4405 
4406 void CMSCollector::preclean() {
4407   check_correct_thread_executing();
4408   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4409   verify_work_stacks_empty();
4410   verify_overflow_empty();
4411   _abort_preclean = false;
4412   if (CMSPrecleaningEnabled) {
4413     _eden_chunk_index = 0;
4414     size_t used = get_eden_used();
4415     size_t capacity = get_eden_capacity();
4416     // Don't start sampling unless we will get sufficiently
4417     // many samples.
4418     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4419                 * CMSScheduleRemarkEdenPenetration)) {
4420       _start_sampling = true;
4421     } else {
4422       _start_sampling = false;
4423     }
4424     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4425     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4426     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4427   }
4428   CMSTokenSync x(true); // is cms thread
4429   if (CMSPrecleaningEnabled) {
4430     sample_eden();
4431     _collectorState = AbortablePreclean;
4432   } else {
4433     _collectorState = FinalMarking;
4434   }
4435   verify_work_stacks_empty();
4436   verify_overflow_empty();
4437 }
4438 
4439 // Try and schedule the remark such that young gen
4440 // occupancy is CMSScheduleRemarkEdenPenetration %.
4441 void CMSCollector::abortable_preclean() {
4442   check_correct_thread_executing();
4443   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
4444   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4445 
4446   // If Eden's current occupancy is below this threshold,
4447   // immediately schedule the remark; else preclean
4448   // past the next scavenge in an effort to
4449   // schedule the pause as described avove. By choosing
4450   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4451   // we will never do an actual abortable preclean cycle.
4452   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4453     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4454     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4455     // We need more smarts in the abortable preclean
4456     // loop below to deal with cases where allocation
4457     // in young gen is very very slow, and our precleaning
4458     // is running a losing race against a horde of
4459     // mutators intent on flooding us with CMS updates
4460     // (dirty cards).
4461     // One, admittedly dumb, strategy is to give up
4462     // after a certain number of abortable precleaning loops
4463     // or after a certain maximum time. We want to make
4464     // this smarter in the next iteration.
4465     // XXX FIX ME!!! YSR
4466     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4467     while (!(should_abort_preclean() ||
4468              ConcurrentMarkSweepThread::should_terminate())) {
4469       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4470       cumworkdone += workdone;
4471       loops++;
4472       // Voluntarily terminate abortable preclean phase if we have
4473       // been at it for too long.
4474       if ((CMSMaxAbortablePrecleanLoops != 0) &&
4475           loops >= CMSMaxAbortablePrecleanLoops) {
4476         if (PrintGCDetails) {
4477           gclog_or_tty->print(" CMS: abort preclean due to loops ");
4478         }
4479         break;
4480       }
4481       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4482         if (PrintGCDetails) {
4483           gclog_or_tty->print(" CMS: abort preclean due to time ");
4484         }
4485         break;
4486       }
4487       // If we are doing little work each iteration, we should
4488       // take a short break.
4489       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4490         // Sleep for some time, waiting for work to accumulate
4491         stopTimer();
4492         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4493         startTimer();
4494         waited++;
4495       }
4496     }
4497     if (PrintCMSStatistics > 0) {
4498       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4499                           loops, waited, cumworkdone);
4500     }
4501   }
4502   CMSTokenSync x(true); // is cms thread
4503   if (_collectorState != Idling) {
4504     assert(_collectorState == AbortablePreclean,
4505            "Spontaneous state transition?");
4506     _collectorState = FinalMarking;
4507   } // Else, a foreground collection completed this CMS cycle.
4508   return;
4509 }
4510 
4511 // Respond to an Eden sampling opportunity
4512 void CMSCollector::sample_eden() {
4513   // Make sure a young gc cannot sneak in between our
4514   // reading and recording of a sample.
4515   assert(Thread::current()->is_ConcurrentGC_thread(),
4516          "Only the cms thread may collect Eden samples");
4517   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4518          "Should collect samples while holding CMS token");
4519   if (!_start_sampling) {
4520     return;
4521   }
4522   if (_eden_chunk_array) {
4523     if (_eden_chunk_index < _eden_chunk_capacity) {
4524       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4525       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4526              "Unexpected state of Eden");
4527       // We'd like to check that what we just sampled is an oop-start address;
4528       // however, we cannot do that here since the object may not yet have been
4529       // initialized. So we'll instead do the check when we _use_ this sample
4530       // later.
4531       if (_eden_chunk_index == 0 ||
4532           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4533                          _eden_chunk_array[_eden_chunk_index-1])
4534            >= CMSSamplingGrain)) {
4535         _eden_chunk_index++;  // commit sample
4536       }
4537     }
4538   }
4539   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4540     size_t used = get_eden_used();
4541     size_t capacity = get_eden_capacity();
4542     assert(used <= capacity, "Unexpected state of Eden");
4543     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4544       _abort_preclean = true;
4545     }
4546   }
4547 }
4548 
4549 
4550 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4551   assert(_collectorState == Precleaning ||
4552          _collectorState == AbortablePreclean, "incorrect state");
4553   ResourceMark rm;
4554   HandleMark   hm;
4555 
4556   // Precleaning is currently not MT but the reference processor
4557   // may be set for MT.  Disable it temporarily here.
4558   ReferenceProcessor* rp = ref_processor();
4559   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4560 
4561   // Do one pass of scrubbing the discovered reference lists
4562   // to remove any reference objects with strongly-reachable
4563   // referents.
4564   if (clean_refs) {
4565     CMSPrecleanRefsYieldClosure yield_cl(this);
4566     assert(rp->span().equals(_span), "Spans should be equal");
4567     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4568                                    &_markStack, &_revisitStack,
4569                                    true /* preclean */);
4570     CMSDrainMarkingStackClosure complete_trace(this,
4571                                    _span, &_markBitMap, &_markStack,
4572                                    &keep_alive, true /* preclean */);
4573 
4574     // We don't want this step to interfere with a young
4575     // collection because we don't want to take CPU
4576     // or memory bandwidth away from the young GC threads
4577     // (which may be as many as there are CPUs).
4578     // Note that we don't need to protect ourselves from
4579     // interference with mutators because they can't
4580     // manipulate the discovered reference lists nor affect
4581     // the computed reachability of the referents, the
4582     // only properties manipulated by the precleaning
4583     // of these reference lists.
4584     stopTimer();
4585     CMSTokenSyncWithLocks x(true /* is cms thread */,
4586                             bitMapLock());
4587     startTimer();
4588     sample_eden();
4589 
4590     // The following will yield to allow foreground
4591     // collection to proceed promptly. XXX YSR:
4592     // The code in this method may need further
4593     // tweaking for better performance and some restructuring
4594     // for cleaner interfaces.
4595     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4596     rp->preclean_discovered_references(
4597           rp->is_alive_non_header(), &keep_alive, &complete_trace,
4598           &yield_cl, should_unload_classes(), gc_timer);
4599   }
4600 
4601   if (clean_survivor) {  // preclean the active survivor space(s)
4602     assert(_young_gen->kind() == Generation::DefNew ||
4603            _young_gen->kind() == Generation::ParNew ||
4604            _young_gen->kind() == Generation::ASParNew,
4605          "incorrect type for cast");
4606     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4607     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4608                              &_markBitMap, &_modUnionTable,
4609                              &_markStack, &_revisitStack,
4610                              true /* precleaning phase */);
4611     stopTimer();
4612     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4613                              bitMapLock());
4614     startTimer();
4615     unsigned int before_count =
4616       GenCollectedHeap::heap()->total_collections();
4617     SurvivorSpacePrecleanClosure
4618       sss_cl(this, _span, &_markBitMap, &_markStack,
4619              &pam_cl, before_count, CMSYield);
4620     DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4621     dng->from()->object_iterate_careful(&sss_cl);
4622     dng->to()->object_iterate_careful(&sss_cl);
4623   }
4624   MarkRefsIntoAndScanClosure
4625     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4626              &_markStack, &_revisitStack, this, CMSYield,
4627              true /* precleaning phase */);
4628   // CAUTION: The following closure has persistent state that may need to
4629   // be reset upon a decrease in the sequence of addresses it
4630   // processes.
4631   ScanMarkedObjectsAgainCarefullyClosure
4632     smoac_cl(this, _span,
4633       &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4634 
4635   // Preclean dirty cards in ModUnionTable and CardTable using
4636   // appropriate convergence criterion;
4637   // repeat CMSPrecleanIter times unless we find that
4638   // we are losing.
4639   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4640   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4641          "Bad convergence multiplier");
4642   assert(CMSPrecleanThreshold >= 100,
4643          "Unreasonably low CMSPrecleanThreshold");
4644 
4645   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4646   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4647        numIter < CMSPrecleanIter;
4648        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4649     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4650     if (CMSPermGenPrecleaningEnabled) {
4651       curNumCards  += preclean_mod_union_table(_permGen, &smoac_cl);
4652     }
4653     if (Verbose && PrintGCDetails) {
4654       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4655     }
4656     // Either there are very few dirty cards, so re-mark
4657     // pause will be small anyway, or our pre-cleaning isn't
4658     // that much faster than the rate at which cards are being
4659     // dirtied, so we might as well stop and re-mark since
4660     // precleaning won't improve our re-mark time by much.
4661     if (curNumCards <= CMSPrecleanThreshold ||
4662         (numIter > 0 &&
4663          (curNumCards * CMSPrecleanDenominator >
4664          lastNumCards * CMSPrecleanNumerator))) {
4665       numIter++;
4666       cumNumCards += curNumCards;
4667       break;
4668     }
4669   }
4670   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4671   if (CMSPermGenPrecleaningEnabled) {
4672     curNumCards += preclean_card_table(_permGen, &smoac_cl);
4673   }
4674   cumNumCards += curNumCards;
4675   if (PrintGCDetails && PrintCMSStatistics != 0) {
4676     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4677                   curNumCards, cumNumCards, numIter);
4678   }
4679   return cumNumCards;   // as a measure of useful work done
4680 }
4681 
4682 // PRECLEANING NOTES:
4683 // Precleaning involves:
4684 // . reading the bits of the modUnionTable and clearing the set bits.
4685 // . For the cards corresponding to the set bits, we scan the
4686 //   objects on those cards. This means we need the free_list_lock
4687 //   so that we can safely iterate over the CMS space when scanning
4688 //   for oops.
4689 // . When we scan the objects, we'll be both reading and setting
4690 //   marks in the marking bit map, so we'll need the marking bit map.
4691 // . For protecting _collector_state transitions, we take the CGC_lock.
4692 //   Note that any races in the reading of of card table entries by the
4693 //   CMS thread on the one hand and the clearing of those entries by the
4694 //   VM thread or the setting of those entries by the mutator threads on the
4695 //   other are quite benign. However, for efficiency it makes sense to keep
4696 //   the VM thread from racing with the CMS thread while the latter is
4697 //   dirty card info to the modUnionTable. We therefore also use the
4698 //   CGC_lock to protect the reading of the card table and the mod union
4699 //   table by the CM thread.
4700 // . We run concurrently with mutator updates, so scanning
4701 //   needs to be done carefully  -- we should not try to scan
4702 //   potentially uninitialized objects.
4703 //
4704 // Locking strategy: While holding the CGC_lock, we scan over and
4705 // reset a maximal dirty range of the mod union / card tables, then lock
4706 // the free_list_lock and bitmap lock to do a full marking, then
4707 // release these locks; and repeat the cycle. This allows for a
4708 // certain amount of fairness in the sharing of these locks between
4709 // the CMS collector on the one hand, and the VM thread and the
4710 // mutators on the other.
4711 
4712 // NOTE: preclean_mod_union_table() and preclean_card_table()
4713 // further below are largely identical; if you need to modify
4714 // one of these methods, please check the other method too.
4715 
4716 size_t CMSCollector::preclean_mod_union_table(
4717   ConcurrentMarkSweepGeneration* gen,
4718   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4719   verify_work_stacks_empty();
4720   verify_overflow_empty();
4721 
4722   // Turn off checking for this method but turn it back on
4723   // selectively.  There are yield points in this method
4724   // but it is difficult to turn the checking off just around
4725   // the yield points.  It is simpler to selectively turn
4726   // it on.
4727   DEBUG_ONLY(RememberKlassesChecker mux(false);)
4728 
4729   // strategy: starting with the first card, accumulate contiguous
4730   // ranges of dirty cards; clear these cards, then scan the region
4731   // covered by these cards.
4732 
4733   // Since all of the MUT is committed ahead, we can just use
4734   // that, in case the generations expand while we are precleaning.
4735   // It might also be fine to just use the committed part of the
4736   // generation, but we might potentially miss cards when the
4737   // generation is rapidly expanding while we are in the midst
4738   // of precleaning.
4739   HeapWord* startAddr = gen->reserved().start();
4740   HeapWord* endAddr   = gen->reserved().end();
4741 
4742   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4743 
4744   size_t numDirtyCards, cumNumDirtyCards;
4745   HeapWord *nextAddr, *lastAddr;
4746   for (cumNumDirtyCards = numDirtyCards = 0,
4747        nextAddr = lastAddr = startAddr;
4748        nextAddr < endAddr;
4749        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4750 
4751     ResourceMark rm;
4752     HandleMark   hm;
4753 
4754     MemRegion dirtyRegion;
4755     {
4756       stopTimer();
4757       // Potential yield point
4758       CMSTokenSync ts(true);
4759       startTimer();
4760       sample_eden();
4761       // Get dirty region starting at nextOffset (inclusive),
4762       // simultaneously clearing it.
4763       dirtyRegion =
4764         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4765       assert(dirtyRegion.start() >= nextAddr,
4766              "returned region inconsistent?");
4767     }
4768     // Remember where the next search should begin.
4769     // The returned region (if non-empty) is a right open interval,
4770     // so lastOffset is obtained from the right end of that
4771     // interval.
4772     lastAddr = dirtyRegion.end();
4773     // Should do something more transparent and less hacky XXX
4774     numDirtyCards =
4775       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4776 
4777     // We'll scan the cards in the dirty region (with periodic
4778     // yields for foreground GC as needed).
4779     if (!dirtyRegion.is_empty()) {
4780       assert(numDirtyCards > 0, "consistency check");
4781       HeapWord* stop_point = NULL;
4782       stopTimer();
4783       // Potential yield point
4784       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4785                                bitMapLock());
4786       startTimer();
4787       {
4788         verify_work_stacks_empty();
4789         verify_overflow_empty();
4790         sample_eden();
4791         DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4792         stop_point =
4793           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4794       }
4795       if (stop_point != NULL) {
4796         // The careful iteration stopped early either because it found an
4797         // uninitialized object, or because we were in the midst of an
4798         // "abortable preclean", which should now be aborted. Redirty
4799         // the bits corresponding to the partially-scanned or unscanned
4800         // cards. We'll either restart at the next block boundary or
4801         // abort the preclean.
4802         assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4803                (_collectorState == AbortablePreclean && should_abort_preclean()),
4804                "Unparsable objects should only be in perm gen.");
4805         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4806         if (should_abort_preclean()) {
4807           break; // out of preclean loop
4808         } else {
4809           // Compute the next address at which preclean should pick up;
4810           // might need bitMapLock in order to read P-bits.
4811           lastAddr = next_card_start_after_block(stop_point);
4812         }
4813       }
4814     } else {
4815       assert(lastAddr == endAddr, "consistency check");
4816       assert(numDirtyCards == 0, "consistency check");
4817       break;
4818     }
4819   }
4820   verify_work_stacks_empty();
4821   verify_overflow_empty();
4822   return cumNumDirtyCards;
4823 }
4824 
4825 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4826 // below are largely identical; if you need to modify
4827 // one of these methods, please check the other method too.
4828 
4829 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4830   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4831   // strategy: it's similar to precleamModUnionTable above, in that
4832   // we accumulate contiguous ranges of dirty cards, mark these cards
4833   // precleaned, then scan the region covered by these cards.
4834   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4835   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4836 
4837   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4838 
4839   size_t numDirtyCards, cumNumDirtyCards;
4840   HeapWord *lastAddr, *nextAddr;
4841 
4842   for (cumNumDirtyCards = numDirtyCards = 0,
4843        nextAddr = lastAddr = startAddr;
4844        nextAddr < endAddr;
4845        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4846 
4847     ResourceMark rm;
4848     HandleMark   hm;
4849 
4850     MemRegion dirtyRegion;
4851     {
4852       // See comments in "Precleaning notes" above on why we
4853       // do this locking. XXX Could the locking overheads be
4854       // too high when dirty cards are sparse? [I don't think so.]
4855       stopTimer();
4856       CMSTokenSync x(true); // is cms thread
4857       startTimer();
4858       sample_eden();
4859       // Get and clear dirty region from card table
4860       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4861                                     MemRegion(nextAddr, endAddr),
4862                                     true,
4863                                     CardTableModRefBS::precleaned_card_val());
4864 
4865       assert(dirtyRegion.start() >= nextAddr,
4866              "returned region inconsistent?");
4867     }
4868     lastAddr = dirtyRegion.end();
4869     numDirtyCards =
4870       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4871 
4872     if (!dirtyRegion.is_empty()) {
4873       stopTimer();
4874       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4875       startTimer();
4876       sample_eden();
4877       verify_work_stacks_empty();
4878       verify_overflow_empty();
4879       DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4880       HeapWord* stop_point =
4881         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4882       if (stop_point != NULL) {
4883         // The careful iteration stopped early because it found an
4884         // uninitialized object.  Redirty the bits corresponding to the
4885         // partially-scanned or unscanned cards, and start again at the
4886         // next block boundary.
4887         assert(CMSPermGenPrecleaningEnabled ||
4888                (_collectorState == AbortablePreclean && should_abort_preclean()),
4889                "Unparsable objects should only be in perm gen.");
4890         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4891         if (should_abort_preclean()) {
4892           break; // out of preclean loop
4893         } else {
4894           // Compute the next address at which preclean should pick up.
4895           lastAddr = next_card_start_after_block(stop_point);
4896         }
4897       }
4898     } else {
4899       break;
4900     }
4901   }
4902   verify_work_stacks_empty();
4903   verify_overflow_empty();
4904   return cumNumDirtyCards;
4905 }
4906 
4907 void CMSCollector::checkpointRootsFinal(bool asynch,
4908   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4909   assert(_collectorState == FinalMarking, "incorrect state transition?");
4910   check_correct_thread_executing();
4911   // world is stopped at this checkpoint
4912   assert(SafepointSynchronize::is_at_safepoint(),
4913          "world should be stopped");
4914   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4915 
4916   verify_work_stacks_empty();
4917   verify_overflow_empty();
4918 
4919   SpecializationStats::clear();
4920   if (PrintGCDetails) {
4921     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4922                         _young_gen->used() / K,
4923                         _young_gen->capacity() / K);
4924   }
4925   if (asynch) {
4926     if (CMSScavengeBeforeRemark) {
4927       GenCollectedHeap* gch = GenCollectedHeap::heap();
4928       // Temporarily set flag to false, GCH->do_collection will
4929       // expect it to be false and set to true
4930       FlagSetting fl(gch->_is_gc_active, false);
4931       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4932         PrintGCDetails && Verbose, true, _gc_timer_cm);)
4933       int level = _cmsGen->level() - 1;
4934       if (level >= 0) {
4935         gch->do_collection(true,        // full (i.e. force, see below)
4936                            false,       // !clear_all_soft_refs
4937                            0,           // size
4938                            false,       // is_tlab
4939                            level        // max_level
4940                           );
4941       }
4942     }
4943     FreelistLocker x(this);
4944     MutexLockerEx y(bitMapLock(),
4945                     Mutex::_no_safepoint_check_flag);
4946     assert(!init_mark_was_synchronous, "but that's impossible!");
4947     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4948   } else {
4949     // already have all the locks
4950     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4951                              init_mark_was_synchronous);
4952   }
4953   verify_work_stacks_empty();
4954   verify_overflow_empty();
4955   SpecializationStats::print();
4956 }
4957 
4958 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4959   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4960 
4961   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4962 
4963   assert(haveFreelistLocks(), "must have free list locks");
4964   assert_lock_strong(bitMapLock());
4965 
4966   if (UseAdaptiveSizePolicy) {
4967     size_policy()->checkpoint_roots_final_begin();
4968   }
4969 
4970   ResourceMark rm;
4971   HandleMark   hm;
4972 
4973   GenCollectedHeap* gch = GenCollectedHeap::heap();
4974 
4975   if (should_unload_classes()) {
4976     CodeCache::gc_prologue();
4977   }
4978   assert(haveFreelistLocks(), "must have free list locks");
4979   assert_lock_strong(bitMapLock());
4980 
4981   DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
4982   if (!init_mark_was_synchronous) {
4983     // We might assume that we need not fill TLAB's when
4984     // CMSScavengeBeforeRemark is set, because we may have just done
4985     // a scavenge which would have filled all TLAB's -- and besides
4986     // Eden would be empty. This however may not always be the case --
4987     // for instance although we asked for a scavenge, it may not have
4988     // happened because of a JNI critical section. We probably need
4989     // a policy for deciding whether we can in that case wait until
4990     // the critical section releases and then do the remark following
4991     // the scavenge, and skip it here. In the absence of that policy,
4992     // or of an indication of whether the scavenge did indeed occur,
4993     // we cannot rely on TLAB's having been filled and must do
4994     // so here just in case a scavenge did not happen.
4995     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4996     // Update the saved marks which may affect the root scans.
4997     gch->save_marks();
4998 
4999     {
5000       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5001 
5002       // Note on the role of the mod union table:
5003       // Since the marker in "markFromRoots" marks concurrently with
5004       // mutators, it is possible for some reachable objects not to have been
5005       // scanned. For instance, an only reference to an object A was
5006       // placed in object B after the marker scanned B. Unless B is rescanned,
5007       // A would be collected. Such updates to references in marked objects
5008       // are detected via the mod union table which is the set of all cards
5009       // dirtied since the first checkpoint in this GC cycle and prior to
5010       // the most recent young generation GC, minus those cleaned up by the
5011       // concurrent precleaning.
5012       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5013         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5014         do_remark_parallel();
5015       } else {
5016         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5017                     _gc_timer_cm);
5018         do_remark_non_parallel();
5019       }
5020     }
5021   } else {
5022     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5023     // The initial mark was stop-world, so there's no rescanning to
5024     // do; go straight on to the next step below.
5025   }
5026   verify_work_stacks_empty();
5027   verify_overflow_empty();
5028 
5029   {
5030     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
5031     refProcessingWork(asynch, clear_all_soft_refs);
5032   }
5033   verify_work_stacks_empty();
5034   verify_overflow_empty();
5035 
5036   if (should_unload_classes()) {
5037     CodeCache::gc_epilogue();
5038   }
5039   JvmtiExport::gc_epilogue();
5040 
5041   // If we encountered any (marking stack / work queue) overflow
5042   // events during the current CMS cycle, take appropriate
5043   // remedial measures, where possible, so as to try and avoid
5044   // recurrence of that condition.
5045   assert(_markStack.isEmpty(), "No grey objects");
5046   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5047                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5048   if (ser_ovflw > 0) {
5049     if (PrintCMSStatistics != 0) {
5050       gclog_or_tty->print_cr("Marking stack overflow (benign) "
5051         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5052         ", kac_preclean="SIZE_FORMAT")",
5053         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5054         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5055     }
5056     _markStack.expand();
5057     _ser_pmc_remark_ovflw = 0;
5058     _ser_pmc_preclean_ovflw = 0;
5059     _ser_kac_preclean_ovflw = 0;
5060     _ser_kac_ovflw = 0;
5061   }
5062   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5063     if (PrintCMSStatistics != 0) {
5064       gclog_or_tty->print_cr("Work queue overflow (benign) "
5065         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5066         _par_pmc_remark_ovflw, _par_kac_ovflw);
5067     }
5068     _par_pmc_remark_ovflw = 0;
5069     _par_kac_ovflw = 0;
5070   }
5071   if (PrintCMSStatistics != 0) {
5072      if (_markStack._hit_limit > 0) {
5073        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5074                               _markStack._hit_limit);
5075      }
5076      if (_markStack._failed_double > 0) {
5077        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5078                               " current capacity "SIZE_FORMAT,
5079                               _markStack._failed_double,
5080                               _markStack.capacity());
5081      }
5082   }
5083   _markStack._hit_limit = 0;
5084   _markStack._failed_double = 0;
5085 
5086   // Check that all the klasses have been checked
5087   assert(_revisitStack.isEmpty(), "Not all klasses revisited");
5088 
5089   if ((VerifyAfterGC || VerifyDuringGC) &&
5090       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5091     verify_after_remark();
5092   }
5093 
5094   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5095 
5096   // Change under the freelistLocks.
5097   _collectorState = Sweeping;
5098   // Call isAllClear() under bitMapLock
5099   assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
5100     " final marking");
5101   if (UseAdaptiveSizePolicy) {
5102     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5103   }
5104 }
5105 
5106 // Parallel remark task
5107 class CMSParRemarkTask: public AbstractGangTask {
5108   CMSCollector* _collector;
5109   int           _n_workers;
5110   CompactibleFreeListSpace* _cms_space;
5111   CompactibleFreeListSpace* _perm_space;
5112 
5113   // The per-thread work queues, available here for stealing.
5114   OopTaskQueueSet*       _task_queues;
5115   ParallelTaskTerminator _term;
5116 
5117  public:
5118   // A value of 0 passed to n_workers will cause the number of
5119   // workers to be taken from the active workers in the work gang.
5120   CMSParRemarkTask(CMSCollector* collector,
5121                    CompactibleFreeListSpace* cms_space,
5122                    CompactibleFreeListSpace* perm_space,
5123                    int n_workers, FlexibleWorkGang* workers,
5124                    OopTaskQueueSet* task_queues):
5125     AbstractGangTask("Rescan roots and grey objects in parallel"),
5126     _collector(collector),
5127     _cms_space(cms_space), _perm_space(perm_space),
5128     _n_workers(n_workers),
5129     _task_queues(task_queues),
5130     _term(n_workers, task_queues) { }
5131 
5132   OopTaskQueueSet* task_queues() { return _task_queues; }
5133 
5134   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5135 
5136   ParallelTaskTerminator* terminator() { return &_term; }
5137   int n_workers() { return _n_workers; }
5138 
5139   void work(uint worker_id);
5140 
5141  private:
5142   // Work method in support of parallel rescan ... of young gen spaces
5143   void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
5144                              ContiguousSpace* space,
5145                              HeapWord** chunk_array, size_t chunk_top);
5146 
5147   // ... of  dirty cards in old space
5148   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5149                                   Par_MarkRefsIntoAndScanClosure* cl);
5150 
5151   // ... work stealing for the above
5152   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5153 };
5154 
5155 // work_queue(i) is passed to the closure
5156 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5157 // also is passed to do_dirty_card_rescan_tasks() and to
5158 // do_work_steal() to select the i-th task_queue.
5159 
5160 void CMSParRemarkTask::work(uint worker_id) {
5161   elapsedTimer _timer;
5162   ResourceMark rm;
5163   HandleMark   hm;
5164 
5165   // ---------- rescan from roots --------------
5166   _timer.start();
5167   GenCollectedHeap* gch = GenCollectedHeap::heap();
5168   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5169     _collector->_span, _collector->ref_processor(),
5170     &(_collector->_markBitMap),
5171     work_queue(worker_id), &(_collector->_revisitStack));
5172 
5173   // Rescan young gen roots first since these are likely
5174   // coarsely partitioned and may, on that account, constitute
5175   // the critical path; thus, it's best to start off that
5176   // work first.
5177   // ---------- young gen roots --------------
5178   {
5179     DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5180     EdenSpace* eden_space = dng->eden();
5181     ContiguousSpace* from_space = dng->from();
5182     ContiguousSpace* to_space   = dng->to();
5183 
5184     HeapWord** eca = _collector->_eden_chunk_array;
5185     size_t     ect = _collector->_eden_chunk_index;
5186     HeapWord** sca = _collector->_survivor_chunk_array;
5187     size_t     sct = _collector->_survivor_chunk_index;
5188 
5189     assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5190     assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5191 
5192     do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
5193     do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
5194     do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
5195 
5196     _timer.stop();
5197     if (PrintCMSStatistics != 0) {
5198       gclog_or_tty->print_cr(
5199         "Finished young gen rescan work in %dth thread: %3.3f sec",
5200         worker_id, _timer.seconds());
5201     }
5202   }
5203 
5204   // ---------- remaining roots --------------
5205   _timer.reset();
5206   _timer.start();
5207   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5208                                 false,     // yg was scanned above
5209                                 false,     // this is parallel code
5210                                 true,      // collecting perm gen
5211                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5212                                 &par_mrias_cl,
5213                                 true,   // walk all of code cache if (so & SO_CodeCache)
5214                                 NULL);
5215   assert(_collector->should_unload_classes()
5216          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5217          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5218   _timer.stop();
5219   if (PrintCMSStatistics != 0) {
5220     gclog_or_tty->print_cr(
5221       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5222       worker_id, _timer.seconds());
5223   }
5224 
5225   // ---------- rescan dirty cards ------------
5226   _timer.reset();
5227   _timer.start();
5228 
5229   // Do the rescan tasks for each of the two spaces
5230   // (cms_space and perm_space) in turn.
5231   // "worker_id" is passed to select the task_queue for "worker_id"
5232   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5233   do_dirty_card_rescan_tasks(_perm_space, worker_id, &par_mrias_cl);
5234   _timer.stop();
5235   if (PrintCMSStatistics != 0) {
5236     gclog_or_tty->print_cr(
5237       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5238       worker_id, _timer.seconds());
5239   }
5240 
5241   // ---------- steal work from other threads ...
5242   // ---------- ... and drain overflow list.
5243   _timer.reset();
5244   _timer.start();
5245   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5246   _timer.stop();
5247   if (PrintCMSStatistics != 0) {
5248     gclog_or_tty->print_cr(
5249       "Finished work stealing in %dth thread: %3.3f sec",
5250       worker_id, _timer.seconds());
5251   }
5252 }
5253 
5254 // Note that parameter "i" is not used.
5255 void
5256 CMSParRemarkTask::do_young_space_rescan(int i,
5257   Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5258   HeapWord** chunk_array, size_t chunk_top) {
5259   // Until all tasks completed:
5260   // . claim an unclaimed task
5261   // . compute region boundaries corresponding to task claimed
5262   //   using chunk_array
5263   // . par_oop_iterate(cl) over that region
5264 
5265   ResourceMark rm;
5266   HandleMark   hm;
5267 
5268   SequentialSubTasksDone* pst = space->par_seq_tasks();
5269   assert(pst->valid(), "Uninitialized use?");
5270 
5271   uint nth_task = 0;
5272   uint n_tasks  = pst->n_tasks();
5273 
5274   HeapWord *start, *end;
5275   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5276     // We claimed task # nth_task; compute its boundaries.
5277     if (chunk_top == 0) {  // no samples were taken
5278       assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5279       start = space->bottom();
5280       end   = space->top();
5281     } else if (nth_task == 0) {
5282       start = space->bottom();
5283       end   = chunk_array[nth_task];
5284     } else if (nth_task < (uint)chunk_top) {
5285       assert(nth_task >= 1, "Control point invariant");
5286       start = chunk_array[nth_task - 1];
5287       end   = chunk_array[nth_task];
5288     } else {
5289       assert(nth_task == (uint)chunk_top, "Control point invariant");
5290       start = chunk_array[chunk_top - 1];
5291       end   = space->top();
5292     }
5293     MemRegion mr(start, end);
5294     // Verify that mr is in space
5295     assert(mr.is_empty() || space->used_region().contains(mr),
5296            "Should be in space");
5297     // Verify that "start" is an object boundary
5298     assert(mr.is_empty() || oop(mr.start())->is_oop(),
5299            "Should be an oop");
5300     space->par_oop_iterate(mr, cl);
5301   }
5302   pst->all_tasks_completed();
5303 }
5304 
5305 void
5306 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5307   CompactibleFreeListSpace* sp, int i,
5308   Par_MarkRefsIntoAndScanClosure* cl) {
5309   // Until all tasks completed:
5310   // . claim an unclaimed task
5311   // . compute region boundaries corresponding to task claimed
5312   // . transfer dirty bits ct->mut for that region
5313   // . apply rescanclosure to dirty mut bits for that region
5314 
5315   ResourceMark rm;
5316   HandleMark   hm;
5317 
5318   OopTaskQueue* work_q = work_queue(i);
5319   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5320   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5321   // CAUTION: This closure has state that persists across calls to
5322   // the work method dirty_range_iterate_clear() in that it has
5323   // imbedded in it a (subtype of) UpwardsObjectClosure. The
5324   // use of that state in the imbedded UpwardsObjectClosure instance
5325   // assumes that the cards are always iterated (even if in parallel
5326   // by several threads) in monotonically increasing order per each
5327   // thread. This is true of the implementation below which picks
5328   // card ranges (chunks) in monotonically increasing order globally
5329   // and, a-fortiori, in monotonically increasing order per thread
5330   // (the latter order being a subsequence of the former).
5331   // If the work code below is ever reorganized into a more chaotic
5332   // work-partitioning form than the current "sequential tasks"
5333   // paradigm, the use of that persistent state will have to be
5334   // revisited and modified appropriately. See also related
5335   // bug 4756801 work on which should examine this code to make
5336   // sure that the changes there do not run counter to the
5337   // assumptions made here and necessary for correctness and
5338   // efficiency. Note also that this code might yield inefficient
5339   // behaviour in the case of very large objects that span one or
5340   // more work chunks. Such objects would potentially be scanned
5341   // several times redundantly. Work on 4756801 should try and
5342   // address that performance anomaly if at all possible. XXX
5343   MemRegion  full_span  = _collector->_span;
5344   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
5345   CMSMarkStack* rs = &(_collector->_revisitStack);   // shared
5346   MarkFromDirtyCardsClosure
5347     greyRescanClosure(_collector, full_span, // entire span of interest
5348                       sp, bm, work_q, rs, cl);
5349 
5350   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5351   assert(pst->valid(), "Uninitialized use?");
5352   uint nth_task = 0;
5353   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5354   MemRegion span = sp->used_region();
5355   HeapWord* start_addr = span.start();
5356   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5357                                            alignment);
5358   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5359   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5360          start_addr, "Check alignment");
5361   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5362          chunk_size, "Check alignment");
5363 
5364   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5365     // Having claimed the nth_task, compute corresponding mem-region,
5366     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5367     // The alignment restriction ensures that we do not need any
5368     // synchronization with other gang-workers while setting or
5369     // clearing bits in thus chunk of the MUT.
5370     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5371                                     start_addr + (nth_task+1)*chunk_size);
5372     // The last chunk's end might be way beyond end of the
5373     // used region. In that case pull back appropriately.
5374     if (this_span.end() > end_addr) {
5375       this_span.set_end(end_addr);
5376       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5377     }
5378     // Iterate over the dirty cards covering this chunk, marking them
5379     // precleaned, and setting the corresponding bits in the mod union
5380     // table. Since we have been careful to partition at Card and MUT-word
5381     // boundaries no synchronization is needed between parallel threads.
5382     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5383                                                  &modUnionClosure);
5384 
5385     // Having transferred these marks into the modUnionTable,
5386     // rescan the marked objects on the dirty cards in the modUnionTable.
5387     // Even if this is at a synchronous collection, the initial marking
5388     // may have been done during an asynchronous collection so there
5389     // may be dirty bits in the mod-union table.
5390     _collector->_modUnionTable.dirty_range_iterate_clear(
5391                   this_span, &greyRescanClosure);
5392     _collector->_modUnionTable.verifyNoOneBitsInRange(
5393                                  this_span.start(),
5394                                  this_span.end());
5395   }
5396   pst->all_tasks_completed();  // declare that i am done
5397 }
5398 
5399 // . see if we can share work_queues with ParNew? XXX
5400 void
5401 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5402                                 int* seed) {
5403   OopTaskQueue* work_q = work_queue(i);
5404   NOT_PRODUCT(int num_steals = 0;)
5405   oop obj_to_scan;
5406   CMSBitMap* bm = &(_collector->_markBitMap);
5407 
5408   while (true) {
5409     // Completely finish any left over work from (an) earlier round(s)
5410     cl->trim_queue(0);
5411     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5412                                          (size_t)ParGCDesiredObjsFromOverflowList);
5413     // Now check if there's any work in the overflow list
5414     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5415     // only affects the number of attempts made to get work from the
5416     // overflow list and does not affect the number of workers.  Just
5417     // pass ParallelGCThreads so this behavior is unchanged.
5418     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5419                                                 work_q,
5420                                                 ParallelGCThreads)) {
5421       // found something in global overflow list;
5422       // not yet ready to go stealing work from others.
5423       // We'd like to assert(work_q->size() != 0, ...)
5424       // because we just took work from the overflow list,
5425       // but of course we can't since all of that could have
5426       // been already stolen from us.
5427       // "He giveth and He taketh away."
5428       continue;
5429     }
5430     // Verify that we have no work before we resort to stealing
5431     assert(work_q->size() == 0, "Have work, shouldn't steal");
5432     // Try to steal from other queues that have work
5433     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5434       NOT_PRODUCT(num_steals++;)
5435       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5436       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5437       // Do scanning work
5438       obj_to_scan->oop_iterate(cl);
5439       // Loop around, finish this work, and try to steal some more
5440     } else if (terminator()->offer_termination()) {
5441         break;  // nirvana from the infinite cycle
5442     }
5443   }
5444   NOT_PRODUCT(
5445     if (PrintCMSStatistics != 0) {
5446       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5447     }
5448   )
5449   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5450          "Else our work is not yet done");
5451 }
5452 
5453 // Return a thread-local PLAB recording array, as appropriate.
5454 void* CMSCollector::get_data_recorder(int thr_num) {
5455   if (_survivor_plab_array != NULL &&
5456       (CMSPLABRecordAlways ||
5457        (_collectorState > Marking && _collectorState < FinalMarking))) {
5458     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5459     ChunkArray* ca = &_survivor_plab_array[thr_num];
5460     ca->reset();   // clear it so that fresh data is recorded
5461     return (void*) ca;
5462   } else {
5463     return NULL;
5464   }
5465 }
5466 
5467 // Reset all the thread-local PLAB recording arrays
5468 void CMSCollector::reset_survivor_plab_arrays() {
5469   for (uint i = 0; i < ParallelGCThreads; i++) {
5470     _survivor_plab_array[i].reset();
5471   }
5472 }
5473 
5474 // Merge the per-thread plab arrays into the global survivor chunk
5475 // array which will provide the partitioning of the survivor space
5476 // for CMS rescan.
5477 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5478                                               int no_of_gc_threads) {
5479   assert(_survivor_plab_array  != NULL, "Error");
5480   assert(_survivor_chunk_array != NULL, "Error");
5481   assert(_collectorState == FinalMarking, "Error");
5482   for (int j = 0; j < no_of_gc_threads; j++) {
5483     _cursor[j] = 0;
5484   }
5485   HeapWord* top = surv->top();
5486   size_t i;
5487   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5488     HeapWord* min_val = top;          // Higher than any PLAB address
5489     uint      min_tid = 0;            // position of min_val this round
5490     for (int j = 0; j < no_of_gc_threads; j++) {
5491       ChunkArray* cur_sca = &_survivor_plab_array[j];
5492       if (_cursor[j] == cur_sca->end()) {
5493         continue;
5494       }
5495       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5496       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5497       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5498       if (cur_val < min_val) {
5499         min_tid = j;
5500         min_val = cur_val;
5501       } else {
5502         assert(cur_val < top, "All recorded addresses should be less");
5503       }
5504     }
5505     // At this point min_val and min_tid are respectively
5506     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5507     // and the thread (j) that witnesses that address.
5508     // We record this address in the _survivor_chunk_array[i]
5509     // and increment _cursor[min_tid] prior to the next round i.
5510     if (min_val == top) {
5511       break;
5512     }
5513     _survivor_chunk_array[i] = min_val;
5514     _cursor[min_tid]++;
5515   }
5516   // We are all done; record the size of the _survivor_chunk_array
5517   _survivor_chunk_index = i; // exclusive: [0, i)
5518   if (PrintCMSStatistics > 0) {
5519     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5520   }
5521   // Verify that we used up all the recorded entries
5522   #ifdef ASSERT
5523     size_t total = 0;
5524     for (int j = 0; j < no_of_gc_threads; j++) {
5525       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5526       total += _cursor[j];
5527     }
5528     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5529     // Check that the merged array is in sorted order
5530     if (total > 0) {
5531       for (size_t i = 0; i < total - 1; i++) {
5532         if (PrintCMSStatistics > 0) {
5533           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5534                               i, _survivor_chunk_array[i]);
5535         }
5536         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5537                "Not sorted");
5538       }
5539     }
5540   #endif // ASSERT
5541 }
5542 
5543 // Set up the space's par_seq_tasks structure for work claiming
5544 // for parallel rescan of young gen.
5545 // See ParRescanTask where this is currently used.
5546 void
5547 CMSCollector::
5548 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5549   assert(n_threads > 0, "Unexpected n_threads argument");
5550   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5551 
5552   // Eden space
5553   {
5554     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5555     assert(!pst->valid(), "Clobbering existing data?");
5556     // Each valid entry in [0, _eden_chunk_index) represents a task.
5557     size_t n_tasks = _eden_chunk_index + 1;
5558     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5559     // Sets the condition for completion of the subtask (how many threads
5560     // need to finish in order to be done).
5561     pst->set_n_threads(n_threads);
5562     pst->set_n_tasks((int)n_tasks);
5563   }
5564 
5565   // Merge the survivor plab arrays into _survivor_chunk_array
5566   if (_survivor_plab_array != NULL) {
5567     merge_survivor_plab_arrays(dng->from(), n_threads);
5568   } else {
5569     assert(_survivor_chunk_index == 0, "Error");
5570   }
5571 
5572   // To space
5573   {
5574     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5575     assert(!pst->valid(), "Clobbering existing data?");
5576     // Sets the condition for completion of the subtask (how many threads
5577     // need to finish in order to be done).
5578     pst->set_n_threads(n_threads);
5579     pst->set_n_tasks(1);
5580     assert(pst->valid(), "Error");
5581   }
5582 
5583   // From space
5584   {
5585     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5586     assert(!pst->valid(), "Clobbering existing data?");
5587     size_t n_tasks = _survivor_chunk_index + 1;
5588     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5589     // Sets the condition for completion of the subtask (how many threads
5590     // need to finish in order to be done).
5591     pst->set_n_threads(n_threads);
5592     pst->set_n_tasks((int)n_tasks);
5593     assert(pst->valid(), "Error");
5594   }
5595 }
5596 
5597 // Parallel version of remark
5598 void CMSCollector::do_remark_parallel() {
5599   GenCollectedHeap* gch = GenCollectedHeap::heap();
5600   FlexibleWorkGang* workers = gch->workers();
5601   assert(workers != NULL, "Need parallel worker threads.");
5602   // Choose to use the number of GC workers most recently set
5603   // into "active_workers".  If active_workers is not set, set it
5604   // to ParallelGCThreads.
5605   int n_workers = workers->active_workers();
5606   if (n_workers == 0) {
5607     assert(n_workers > 0, "Should have been set during scavenge");
5608     n_workers = ParallelGCThreads;
5609     workers->set_active_workers(n_workers);
5610   }
5611   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5612   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5613 
5614   CMSParRemarkTask tsk(this,
5615     cms_space, perm_space,
5616     n_workers, workers, task_queues());
5617 
5618   // Set up for parallel process_strong_roots work.
5619   gch->set_par_threads(n_workers);
5620   // We won't be iterating over the cards in the card table updating
5621   // the younger_gen cards, so we shouldn't call the following else
5622   // the verification code as well as subsequent younger_refs_iterate
5623   // code would get confused. XXX
5624   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5625 
5626   // The young gen rescan work will not be done as part of
5627   // process_strong_roots (which currently doesn't knw how to
5628   // parallelize such a scan), but rather will be broken up into
5629   // a set of parallel tasks (via the sampling that the [abortable]
5630   // preclean phase did of EdenSpace, plus the [two] tasks of
5631   // scanning the [two] survivor spaces. Further fine-grain
5632   // parallelization of the scanning of the survivor spaces
5633   // themselves, and of precleaning of the younger gen itself
5634   // is deferred to the future.
5635   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5636 
5637   // The dirty card rescan work is broken up into a "sequence"
5638   // of parallel tasks (per constituent space) that are dynamically
5639   // claimed by the parallel threads.
5640   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5641   perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5642 
5643   // It turns out that even when we're using 1 thread, doing the work in a
5644   // separate thread causes wide variance in run times.  We can't help this
5645   // in the multi-threaded case, but we special-case n=1 here to get
5646   // repeatable measurements of the 1-thread overhead of the parallel code.
5647   if (n_workers > 1) {
5648     // Make refs discovery MT-safe, if it isn't already: it may not
5649     // necessarily be so, since it's possible that we are doing
5650     // ST marking.
5651     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5652     GenCollectedHeap::StrongRootsScope srs(gch);
5653     workers->run_task(&tsk);
5654   } else {
5655     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5656     GenCollectedHeap::StrongRootsScope srs(gch);
5657     tsk.work(0);
5658   }
5659   gch->set_par_threads(0);  // 0 ==> non-parallel.
5660   // restore, single-threaded for now, any preserved marks
5661   // as a result of work_q overflow
5662   restore_preserved_marks_if_any();
5663 }
5664 
5665 // Non-parallel version of remark
5666 void CMSCollector::do_remark_non_parallel() {
5667   ResourceMark rm;
5668   HandleMark   hm;
5669   GenCollectedHeap* gch = GenCollectedHeap::heap();
5670   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5671 
5672   MarkRefsIntoAndScanClosure
5673     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5674              &_markStack, &_revisitStack, this,
5675              false /* should_yield */, false /* not precleaning */);
5676   MarkFromDirtyCardsClosure
5677     markFromDirtyCardsClosure(this, _span,
5678                               NULL,  // space is set further below
5679                               &_markBitMap, &_markStack, &_revisitStack,
5680                               &mrias_cl);
5681   {
5682     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5683     // Iterate over the dirty cards, setting the corresponding bits in the
5684     // mod union table.
5685     {
5686       ModUnionClosure modUnionClosure(&_modUnionTable);
5687       _ct->ct_bs()->dirty_card_iterate(
5688                       _cmsGen->used_region(),
5689                       &modUnionClosure);
5690       _ct->ct_bs()->dirty_card_iterate(
5691                       _permGen->used_region(),
5692                       &modUnionClosure);
5693     }
5694     // Having transferred these marks into the modUnionTable, we just need
5695     // to rescan the marked objects on the dirty cards in the modUnionTable.
5696     // The initial marking may have been done during an asynchronous
5697     // collection so there may be dirty bits in the mod-union table.
5698     const int alignment =
5699       CardTableModRefBS::card_size * BitsPerWord;
5700     {
5701       // ... First handle dirty cards in CMS gen
5702       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5703       MemRegion ur = _cmsGen->used_region();
5704       HeapWord* lb = ur.start();
5705       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5706       MemRegion cms_span(lb, ub);
5707       _modUnionTable.dirty_range_iterate_clear(cms_span,
5708                                                &markFromDirtyCardsClosure);
5709       verify_work_stacks_empty();
5710       if (PrintCMSStatistics != 0) {
5711         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5712           markFromDirtyCardsClosure.num_dirty_cards());
5713       }
5714     }
5715     {
5716       // .. and then repeat for dirty cards in perm gen
5717       markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5718       MemRegion ur = _permGen->used_region();
5719       HeapWord* lb = ur.start();
5720       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5721       MemRegion perm_span(lb, ub);
5722       _modUnionTable.dirty_range_iterate_clear(perm_span,
5723                                                &markFromDirtyCardsClosure);
5724       verify_work_stacks_empty();
5725       if (PrintCMSStatistics != 0) {
5726         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5727           markFromDirtyCardsClosure.num_dirty_cards());
5728       }
5729     }
5730   }
5731   if (VerifyDuringGC &&
5732       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5733     HandleMark hm;  // Discard invalid handles created during verification
5734     Universe::verify();
5735   }
5736   {
5737     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5738 
5739     verify_work_stacks_empty();
5740 
5741     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5742     GenCollectedHeap::StrongRootsScope srs(gch);
5743     gch->gen_process_strong_roots(_cmsGen->level(),
5744                                   true,  // younger gens as roots
5745                                   false, // use the local StrongRootsScope
5746                                   true,  // collecting perm gen
5747                                   SharedHeap::ScanningOption(roots_scanning_options()),
5748                                   &mrias_cl,
5749                                   true,   // walk code active on stacks
5750                                   NULL);
5751     assert(should_unload_classes()
5752            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5753            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5754   }
5755   verify_work_stacks_empty();
5756   // Restore evacuated mark words, if any, used for overflow list links
5757   if (!CMSOverflowEarlyRestoration) {
5758     restore_preserved_marks_if_any();
5759   }
5760   verify_overflow_empty();
5761 }
5762 
5763 ////////////////////////////////////////////////////////
5764 // Parallel Reference Processing Task Proxy Class
5765 ////////////////////////////////////////////////////////
5766 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5767   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5768   CMSCollector*          _collector;
5769   CMSBitMap*             _mark_bit_map;
5770   const MemRegion        _span;
5771   ProcessTask&           _task;
5772 
5773 public:
5774   CMSRefProcTaskProxy(ProcessTask&     task,
5775                       CMSCollector*    collector,
5776                       const MemRegion& span,
5777                       CMSBitMap*       mark_bit_map,
5778                       AbstractWorkGang* workers,
5779                       OopTaskQueueSet* task_queues):
5780     // XXX Should superclass AGTWOQ also know about AWG since it knows
5781     // about the task_queues used by the AWG? Then it could initialize
5782     // the terminator() object. See 6984287. The set_for_termination()
5783     // below is a temporary band-aid for the regression in 6984287.
5784     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5785       task_queues),
5786     _task(task),
5787     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5788   {
5789     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5790            "Inconsistency in _span");
5791     set_for_termination(workers->active_workers());
5792   }
5793 
5794   OopTaskQueueSet* task_queues() { return queues(); }
5795 
5796   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5797 
5798   void do_work_steal(int i,
5799                      CMSParDrainMarkingStackClosure* drain,
5800                      CMSParKeepAliveClosure* keep_alive,
5801                      int* seed);
5802 
5803   virtual void work(uint worker_id);
5804 };
5805 
5806 void CMSRefProcTaskProxy::work(uint worker_id) {
5807   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5808   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5809                                         _mark_bit_map,
5810                                         &_collector->_revisitStack,
5811                                         work_queue(worker_id));
5812   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5813                                                  _mark_bit_map,
5814                                                  &_collector->_revisitStack,
5815                                                  work_queue(worker_id));
5816   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5817   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5818   if (_task.marks_oops_alive()) {
5819     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5820                   _collector->hash_seed(worker_id));
5821   }
5822   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5823   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5824 }
5825 
5826 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5827   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5828   EnqueueTask& _task;
5829 
5830 public:
5831   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5832     : AbstractGangTask("Enqueue reference objects in parallel"),
5833       _task(task)
5834   { }
5835 
5836   virtual void work(uint worker_id)
5837   {
5838     _task.work(worker_id);
5839   }
5840 };
5841 
5842 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5843   MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
5844   OopTaskQueue* work_queue):
5845    Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5846    _span(span),
5847    _bit_map(bit_map),
5848    _work_queue(work_queue),
5849    _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
5850    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5851                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5852 { }
5853 
5854 // . see if we can share work_queues with ParNew? XXX
5855 void CMSRefProcTaskProxy::do_work_steal(int i,
5856   CMSParDrainMarkingStackClosure* drain,
5857   CMSParKeepAliveClosure* keep_alive,
5858   int* seed) {
5859   OopTaskQueue* work_q = work_queue(i);
5860   NOT_PRODUCT(int num_steals = 0;)
5861   oop obj_to_scan;
5862 
5863   while (true) {
5864     // Completely finish any left over work from (an) earlier round(s)
5865     drain->trim_queue(0);
5866     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5867                                          (size_t)ParGCDesiredObjsFromOverflowList);
5868     // Now check if there's any work in the overflow list
5869     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5870     // only affects the number of attempts made to get work from the
5871     // overflow list and does not affect the number of workers.  Just
5872     // pass ParallelGCThreads so this behavior is unchanged.
5873     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5874                                                 work_q,
5875                                                 ParallelGCThreads)) {
5876       // Found something in global overflow list;
5877       // not yet ready to go stealing work from others.
5878       // We'd like to assert(work_q->size() != 0, ...)
5879       // because we just took work from the overflow list,
5880       // but of course we can't, since all of that might have
5881       // been already stolen from us.
5882       continue;
5883     }
5884     // Verify that we have no work before we resort to stealing
5885     assert(work_q->size() == 0, "Have work, shouldn't steal");
5886     // Try to steal from other queues that have work
5887     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5888       NOT_PRODUCT(num_steals++;)
5889       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5890       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5891       // Do scanning work
5892       obj_to_scan->oop_iterate(keep_alive);
5893       // Loop around, finish this work, and try to steal some more
5894     } else if (terminator()->offer_termination()) {
5895       break;  // nirvana from the infinite cycle
5896     }
5897   }
5898   NOT_PRODUCT(
5899     if (PrintCMSStatistics != 0) {
5900       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5901     }
5902   )
5903 }
5904 
5905 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5906 {
5907   GenCollectedHeap* gch = GenCollectedHeap::heap();
5908   FlexibleWorkGang* workers = gch->workers();
5909   assert(workers != NULL, "Need parallel worker threads.");
5910   CMSRefProcTaskProxy rp_task(task, &_collector,
5911                               _collector.ref_processor()->span(),
5912                               _collector.markBitMap(),
5913                               workers, _collector.task_queues());
5914   workers->run_task(&rp_task);
5915 }
5916 
5917 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5918 {
5919 
5920   GenCollectedHeap* gch = GenCollectedHeap::heap();
5921   FlexibleWorkGang* workers = gch->workers();
5922   assert(workers != NULL, "Need parallel worker threads.");
5923   CMSRefEnqueueTaskProxy enq_task(task);
5924   workers->run_task(&enq_task);
5925 }
5926 
5927 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5928 
5929   ResourceMark rm;
5930   HandleMark   hm;
5931 
5932   ReferenceProcessor* rp = ref_processor();
5933   assert(rp->span().equals(_span), "Spans should be equal");
5934   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5935   // Process weak references.
5936   rp->setup_policy(clear_all_soft_refs);
5937   verify_work_stacks_empty();
5938 
5939   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5940                                           &_markStack, &_revisitStack,
5941                                           false /* !preclean */);
5942   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5943                                 _span, &_markBitMap, &_markStack,
5944                                 &cmsKeepAliveClosure, false /* !preclean */);
5945   {
5946     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5947 
5948     ReferenceProcessorStats stats;
5949     if (rp->processing_is_mt()) {
5950       // Set the degree of MT here.  If the discovery is done MT, there
5951       // may have been a different number of threads doing the discovery
5952       // and a different number of discovered lists may have Ref objects.
5953       // That is OK as long as the Reference lists are balanced (see
5954       // balance_all_queues() and balance_queues()).
5955       GenCollectedHeap* gch = GenCollectedHeap::heap();
5956       int active_workers = ParallelGCThreads;
5957       FlexibleWorkGang* workers = gch->workers();
5958       if (workers != NULL) {
5959         active_workers = workers->active_workers();
5960         // The expectation is that active_workers will have already
5961         // been set to a reasonable value.  If it has not been set,
5962         // investigate.
5963         assert(active_workers > 0, "Should have been set during scavenge");
5964       }
5965       rp->set_active_mt_degree(active_workers);
5966       CMSRefProcTaskExecutor task_executor(*this);
5967       stats = rp->process_discovered_references(&_is_alive_closure,
5968                                         &cmsKeepAliveClosure,
5969                                         &cmsDrainMarkingStackClosure,
5970                                         &task_executor,
5971                                         _gc_timer_cm);
5972     } else {
5973       stats = rp->process_discovered_references(&_is_alive_closure,
5974                                         &cmsKeepAliveClosure,
5975                                         &cmsDrainMarkingStackClosure,
5976                                         NULL,
5977                                         _gc_timer_cm);
5978     }
5979     _gc_tracer_cm->report_gc_reference_stats(stats);
5980 
5981     verify_work_stacks_empty();
5982   }
5983 
5984   if (should_unload_classes()) {
5985     {
5986       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5987 
5988       // Follow SystemDictionary roots and unload classes
5989       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5990 
5991       // Follow CodeCache roots and unload any methods marked for unloading
5992       CodeCache::do_unloading(&_is_alive_closure,
5993                               &cmsKeepAliveClosure,
5994                               purged_class);
5995 
5996       cmsDrainMarkingStackClosure.do_void();
5997       verify_work_stacks_empty();
5998 
5999       // Update subklass/sibling/implementor links in KlassKlass descendants
6000       assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
6001       oop k;
6002       while ((k = _revisitStack.pop()) != NULL) {
6003         ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
6004                        &_is_alive_closure,
6005                        &cmsKeepAliveClosure);
6006       }
6007       assert(!ClassUnloading ||
6008              (_markStack.isEmpty() && overflow_list_is_empty()),
6009              "Should not have found new reachable objects");
6010       assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
6011       cmsDrainMarkingStackClosure.do_void();
6012       verify_work_stacks_empty();
6013     }
6014 
6015     {
6016       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
6017       // Clean up unreferenced symbols in symbol table.
6018       SymbolTable::unlink();
6019     }
6020   }
6021 
6022   if (should_unload_classes() || !JavaObjectsInPerm) {
6023     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
6024     // Now clean up stale oops in StringTable
6025     StringTable::unlink(&_is_alive_closure);
6026   }
6027 
6028   verify_work_stacks_empty();
6029   // Restore any preserved marks as a result of mark stack or
6030   // work queue overflow
6031   restore_preserved_marks_if_any();  // done single-threaded for now
6032 
6033   rp->set_enqueuing_is_done(true);
6034   if (rp->processing_is_mt()) {
6035     rp->balance_all_queues();
6036     CMSRefProcTaskExecutor task_executor(*this);
6037     rp->enqueue_discovered_references(&task_executor);
6038   } else {
6039     rp->enqueue_discovered_references(NULL);
6040   }
6041   rp->verify_no_references_recorded();
6042   assert(!rp->discovery_enabled(), "should have been disabled");
6043 }
6044 
6045 #ifndef PRODUCT
6046 void CMSCollector::check_correct_thread_executing() {
6047   Thread* t = Thread::current();
6048   // Only the VM thread or the CMS thread should be here.
6049   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6050          "Unexpected thread type");
6051   // If this is the vm thread, the foreground process
6052   // should not be waiting.  Note that _foregroundGCIsActive is
6053   // true while the foreground collector is waiting.
6054   if (_foregroundGCShouldWait) {
6055     // We cannot be the VM thread
6056     assert(t->is_ConcurrentGC_thread(),
6057            "Should be CMS thread");
6058   } else {
6059     // We can be the CMS thread only if we are in a stop-world
6060     // phase of CMS collection.
6061     if (t->is_ConcurrentGC_thread()) {
6062       assert(_collectorState == InitialMarking ||
6063              _collectorState == FinalMarking,
6064              "Should be a stop-world phase");
6065       // The CMS thread should be holding the CMS_token.
6066       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6067              "Potential interference with concurrently "
6068              "executing VM thread");
6069     }
6070   }
6071 }
6072 #endif
6073 
6074 void CMSCollector::sweep(bool asynch) {
6075   assert(_collectorState == Sweeping, "just checking");
6076   check_correct_thread_executing();
6077   verify_work_stacks_empty();
6078   verify_overflow_empty();
6079   increment_sweep_count();
6080   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6081 
6082   _inter_sweep_timer.stop();
6083   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6084   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6085 
6086   // PermGen verification support: If perm gen sweeping is disabled in
6087   // this cycle, we preserve the perm gen object "deadness" information
6088   // in the perm_gen_verify_bit_map. In order to do that we traverse
6089   // all blocks in perm gen and mark all dead objects.
6090   if (verifying() && !should_unload_classes()) {
6091     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
6092            "Should have already been allocated");
6093     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
6094                                markBitMap(), perm_gen_verify_bit_map());
6095     if (asynch) {
6096       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6097                                bitMapLock());
6098       _permGen->cmsSpace()->blk_iterate(&mdo);
6099     } else {
6100       // In the case of synchronous sweep, we already have
6101       // the requisite locks/tokens.
6102       _permGen->cmsSpace()->blk_iterate(&mdo);
6103     }
6104   }
6105 
6106   assert(!_intra_sweep_timer.is_active(), "Should not be active");
6107   _intra_sweep_timer.reset();
6108   _intra_sweep_timer.start();
6109   if (asynch) {
6110     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6111     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6112     // First sweep the old gen then the perm gen
6113     {
6114       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6115                                bitMapLock());
6116       sweepWork(_cmsGen, asynch);
6117     }
6118 
6119     // Now repeat for perm gen
6120     if (should_unload_classes()) {
6121       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6122                              bitMapLock());
6123       sweepWork(_permGen, asynch);
6124     }
6125 
6126     // Update Universe::_heap_*_at_gc figures.
6127     // We need all the free list locks to make the abstract state
6128     // transition from Sweeping to Resetting. See detailed note
6129     // further below.
6130     {
6131       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6132                                _permGen->freelistLock());
6133       // Update heap occupancy information which is used as
6134       // input to soft ref clearing policy at the next gc.
6135       Universe::update_heap_info_at_gc();
6136       _collectorState = Resizing;
6137     }
6138   } else {
6139     // already have needed locks
6140     sweepWork(_cmsGen,  asynch);
6141 
6142     if (should_unload_classes()) {
6143       sweepWork(_permGen, asynch);
6144     }
6145     // Update heap occupancy information which is used as
6146     // input to soft ref clearing policy at the next gc.
6147     Universe::update_heap_info_at_gc();
6148     _collectorState = Resizing;
6149   }
6150   verify_work_stacks_empty();
6151   verify_overflow_empty();
6152 
6153   _intra_sweep_timer.stop();
6154   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6155 
6156   _inter_sweep_timer.reset();
6157   _inter_sweep_timer.start();
6158 
6159   // We need to use a monotonically non-deccreasing time in ms
6160   // or we will see time-warp warnings and os::javaTimeMillis()
6161   // does not guarantee monotonicity.
6162   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6163   update_time_of_last_gc(now);
6164 
6165   // NOTE on abstract state transitions:
6166   // Mutators allocate-live and/or mark the mod-union table dirty
6167   // based on the state of the collection.  The former is done in
6168   // the interval [Marking, Sweeping] and the latter in the interval
6169   // [Marking, Sweeping).  Thus the transitions into the Marking state
6170   // and out of the Sweeping state must be synchronously visible
6171   // globally to the mutators.
6172   // The transition into the Marking state happens with the world
6173   // stopped so the mutators will globally see it.  Sweeping is
6174   // done asynchronously by the background collector so the transition
6175   // from the Sweeping state to the Resizing state must be done
6176   // under the freelistLock (as is the check for whether to
6177   // allocate-live and whether to dirty the mod-union table).
6178   assert(_collectorState == Resizing, "Change of collector state to"
6179     " Resizing must be done under the freelistLocks (plural)");
6180 
6181   // Now that sweeping has been completed, we clear
6182   // the incremental_collection_failed flag,
6183   // thus inviting a younger gen collection to promote into
6184   // this generation. If such a promotion may still fail,
6185   // the flag will be set again when a young collection is
6186   // attempted.
6187   GenCollectedHeap* gch = GenCollectedHeap::heap();
6188   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
6189   gch->update_full_collections_completed(_collection_count_start);
6190 }
6191 
6192 // FIX ME!!! Looks like this belongs in CFLSpace, with
6193 // CMSGen merely delegating to it.
6194 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6195   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6196   HeapWord*  minAddr        = _cmsSpace->bottom();
6197   HeapWord*  largestAddr    =
6198     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6199   if (largestAddr == NULL) {
6200     // The dictionary appears to be empty.  In this case
6201     // try to coalesce at the end of the heap.
6202     largestAddr = _cmsSpace->end();
6203   }
6204   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
6205   size_t nearLargestOffset =
6206     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6207   if (PrintFLSStatistics != 0) {
6208     gclog_or_tty->print_cr(
6209       "CMS: Large Block: " PTR_FORMAT ";"
6210       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6211       largestAddr,
6212       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6213   }
6214   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6215 }
6216 
6217 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6218   return addr >= _cmsSpace->nearLargestChunk();
6219 }
6220 
6221 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6222   return _cmsSpace->find_chunk_at_end();
6223 }
6224 
6225 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6226                                                     bool full) {
6227   // The next lower level has been collected.  Gather any statistics
6228   // that are of interest at this point.
6229   if (!full && (current_level + 1) == level()) {
6230     // Gather statistics on the young generation collection.
6231     collector()->stats().record_gc0_end(used());
6232   }
6233 }
6234 
6235 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6236   GenCollectedHeap* gch = GenCollectedHeap::heap();
6237   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6238     "Wrong type of heap");
6239   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6240     gch->gen_policy()->size_policy();
6241   assert(sp->is_gc_cms_adaptive_size_policy(),
6242     "Wrong type of size policy");
6243   return sp;
6244 }
6245 
6246 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6247   if (PrintGCDetails && Verbose) {
6248     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6249   }
6250   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6251   _debug_collection_type =
6252     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6253   if (PrintGCDetails && Verbose) {
6254     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6255   }
6256 }
6257 
6258 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6259   bool asynch) {
6260   // We iterate over the space(s) underlying this generation,
6261   // checking the mark bit map to see if the bits corresponding
6262   // to specific blocks are marked or not. Blocks that are
6263   // marked are live and are not swept up. All remaining blocks
6264   // are swept up, with coalescing on-the-fly as we sweep up
6265   // contiguous free and/or garbage blocks:
6266   // We need to ensure that the sweeper synchronizes with allocators
6267   // and stop-the-world collectors. In particular, the following
6268   // locks are used:
6269   // . CMS token: if this is held, a stop the world collection cannot occur
6270   // . freelistLock: if this is held no allocation can occur from this
6271   //                 generation by another thread
6272   // . bitMapLock: if this is held, no other thread can access or update
6273   //
6274 
6275   // Note that we need to hold the freelistLock if we use
6276   // block iterate below; else the iterator might go awry if
6277   // a mutator (or promotion) causes block contents to change
6278   // (for instance if the allocator divvies up a block).
6279   // If we hold the free list lock, for all practical purposes
6280   // young generation GC's can't occur (they'll usually need to
6281   // promote), so we might as well prevent all young generation
6282   // GC's while we do a sweeping step. For the same reason, we might
6283   // as well take the bit map lock for the entire duration
6284 
6285   // check that we hold the requisite locks
6286   assert(have_cms_token(), "Should hold cms token");
6287   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6288          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6289         "Should possess CMS token to sweep");
6290   assert_lock_strong(gen->freelistLock());
6291   assert_lock_strong(bitMapLock());
6292 
6293   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6294   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
6295   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6296                                       _inter_sweep_estimate.padded_average(),
6297                                       _intra_sweep_estimate.padded_average());
6298   gen->setNearLargestChunk();
6299 
6300   {
6301     SweepClosure sweepClosure(this, gen, &_markBitMap,
6302                             CMSYield && asynch);
6303     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6304     // We need to free-up/coalesce garbage/blocks from a
6305     // co-terminal free run. This is done in the SweepClosure
6306     // destructor; so, do not remove this scope, else the
6307     // end-of-sweep-census below will be off by a little bit.
6308   }
6309   gen->cmsSpace()->sweep_completed();
6310   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6311   if (should_unload_classes()) {                // unloaded classes this cycle,
6312     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6313   } else {                                      // did not unload classes,
6314     _concurrent_cycles_since_last_unload++;     // ... increment count
6315   }
6316 }
6317 
6318 // Reset CMS data structures (for now just the marking bit map)
6319 // preparatory for the next cycle.
6320 void CMSCollector::reset(bool asynch) {
6321   GenCollectedHeap* gch = GenCollectedHeap::heap();
6322   CMSAdaptiveSizePolicy* sp = size_policy();
6323   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6324   if (asynch) {
6325     CMSTokenSyncWithLocks ts(true, bitMapLock());
6326 
6327     // If the state is not "Resetting", the foreground  thread
6328     // has done a collection and the resetting.
6329     if (_collectorState != Resetting) {
6330       assert(_collectorState == Idling, "The state should only change"
6331         " because the foreground collector has finished the collection");
6332       return;
6333     }
6334 
6335     // Clear the mark bitmap (no grey objects to start with)
6336     // for the next cycle.
6337     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6338     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6339 
6340     HeapWord* curAddr = _markBitMap.startWord();
6341     while (curAddr < _markBitMap.endWord()) {
6342       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6343       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6344       _markBitMap.clear_large_range(chunk);
6345       if (ConcurrentMarkSweepThread::should_yield() &&
6346           !foregroundGCIsActive() &&
6347           CMSYield) {
6348         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6349                "CMS thread should hold CMS token");
6350         assert_lock_strong(bitMapLock());
6351         bitMapLock()->unlock();
6352         ConcurrentMarkSweepThread::desynchronize(true);
6353         ConcurrentMarkSweepThread::acknowledge_yield_request();
6354         stopTimer();
6355         if (PrintCMSStatistics != 0) {
6356           incrementYields();
6357         }
6358         icms_wait();
6359 
6360         // See the comment in coordinator_yield()
6361         for (unsigned i = 0; i < CMSYieldSleepCount &&
6362                          ConcurrentMarkSweepThread::should_yield() &&
6363                          !CMSCollector::foregroundGCIsActive(); ++i) {
6364           os::sleep(Thread::current(), 1, false);
6365           ConcurrentMarkSweepThread::acknowledge_yield_request();
6366         }
6367 
6368         ConcurrentMarkSweepThread::synchronize(true);
6369         bitMapLock()->lock_without_safepoint_check();
6370         startTimer();
6371       }
6372       curAddr = chunk.end();
6373     }
6374     // A successful mostly concurrent collection has been done.
6375     // Because only the full (i.e., concurrent mode failure) collections
6376     // are being measured for gc overhead limits, clean the "near" flag
6377     // and count.
6378     sp->reset_gc_overhead_limit_count();
6379     _collectorState = Idling;
6380   } else {
6381     // already have the lock
6382     assert(_collectorState == Resetting, "just checking");
6383     assert_lock_strong(bitMapLock());
6384     _markBitMap.clear_all();
6385     _collectorState = Idling;
6386   }
6387 
6388   // Stop incremental mode after a cycle completes, so that any future cycles
6389   // are triggered by allocation.
6390   stop_icms();
6391 
6392   NOT_PRODUCT(
6393     if (RotateCMSCollectionTypes) {
6394       _cmsGen->rotate_debug_collection_type();
6395     }
6396   )
6397 
6398   register_gc_end();
6399 }
6400 
6401 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6402   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6403   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6404   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
6405   TraceCollectorStats tcs(counters());
6406 
6407   switch (op) {
6408     case CMS_op_checkpointRootsInitial: {
6409       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6410       checkpointRootsInitial(true);       // asynch
6411       if (PrintGC) {
6412         _cmsGen->printOccupancy("initial-mark");
6413       }
6414       break;
6415     }
6416     case CMS_op_checkpointRootsFinal: {
6417       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6418       checkpointRootsFinal(true,    // asynch
6419                            false,   // !clear_all_soft_refs
6420                            false);  // !init_mark_was_synchronous
6421       if (PrintGC) {
6422         _cmsGen->printOccupancy("remark");
6423       }
6424       break;
6425     }
6426     default:
6427       fatal("No such CMS_op");
6428   }
6429 }
6430 
6431 #ifndef PRODUCT
6432 size_t const CMSCollector::skip_header_HeapWords() {
6433   return FreeChunk::header_size();
6434 }
6435 
6436 // Try and collect here conditions that should hold when
6437 // CMS thread is exiting. The idea is that the foreground GC
6438 // thread should not be blocked if it wants to terminate
6439 // the CMS thread and yet continue to run the VM for a while
6440 // after that.
6441 void CMSCollector::verify_ok_to_terminate() const {
6442   assert(Thread::current()->is_ConcurrentGC_thread(),
6443          "should be called by CMS thread");
6444   assert(!_foregroundGCShouldWait, "should be false");
6445   // We could check here that all the various low-level locks
6446   // are not held by the CMS thread, but that is overkill; see
6447   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6448   // is checked.
6449 }
6450 #endif
6451 
6452 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6453    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6454           "missing Printezis mark?");
6455   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6456   size_t size = pointer_delta(nextOneAddr + 1, addr);
6457   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6458          "alignment problem");
6459   assert(size >= 3, "Necessary for Printezis marks to work");
6460   return size;
6461 }
6462 
6463 // A variant of the above (block_size_using_printezis_bits()) except
6464 // that we return 0 if the P-bits are not yet set.
6465 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6466   if (_markBitMap.isMarked(addr + 1)) {
6467     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6468     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6469     size_t size = pointer_delta(nextOneAddr + 1, addr);
6470     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6471            "alignment problem");
6472     assert(size >= 3, "Necessary for Printezis marks to work");
6473     return size;
6474   }
6475   return 0;
6476 }
6477 
6478 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6479   size_t sz = 0;
6480   oop p = (oop)addr;
6481   if (p->klass_or_null() != NULL && p->is_parsable()) {
6482     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6483   } else {
6484     sz = block_size_using_printezis_bits(addr);
6485   }
6486   assert(sz > 0, "size must be nonzero");
6487   HeapWord* next_block = addr + sz;
6488   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
6489                                              CardTableModRefBS::card_size);
6490   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
6491          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6492          "must be different cards");
6493   return next_card;
6494 }
6495 
6496 
6497 // CMS Bit Map Wrapper /////////////////////////////////////////
6498 
6499 // Construct a CMS bit map infrastructure, but don't create the
6500 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6501 // further below.
6502 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6503   _bm(),
6504   _shifter(shifter),
6505   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6506 {
6507   _bmStartWord = 0;
6508   _bmWordSize  = 0;
6509 }
6510 
6511 bool CMSBitMap::allocate(MemRegion mr) {
6512   _bmStartWord = mr.start();
6513   _bmWordSize  = mr.word_size();
6514   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6515                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6516   if (!brs.is_reserved()) {
6517     warning("CMS bit map allocation failure");
6518     return false;
6519   }
6520   // For now we'll just commit all of the bit map up fromt.
6521   // Later on we'll try to be more parsimonious with swap.
6522   if (!_virtual_space.initialize(brs, brs.size())) {
6523     warning("CMS bit map backing store failure");
6524     return false;
6525   }
6526   assert(_virtual_space.committed_size() == brs.size(),
6527          "didn't reserve backing store for all of CMS bit map?");
6528   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6529   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6530          _bmWordSize, "inconsistency in bit map sizing");
6531   _bm.set_size(_bmWordSize >> _shifter);
6532 
6533   // bm.clear(); // can we rely on getting zero'd memory? verify below
6534   assert(isAllClear(),
6535          "Expected zero'd memory from ReservedSpace constructor");
6536   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6537          "consistency check");
6538   return true;
6539 }
6540 
6541 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6542   HeapWord *next_addr, *end_addr, *last_addr;
6543   assert_locked();
6544   assert(covers(mr), "out-of-range error");
6545   // XXX assert that start and end are appropriately aligned
6546   for (next_addr = mr.start(), end_addr = mr.end();
6547        next_addr < end_addr; next_addr = last_addr) {
6548     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6549     last_addr = dirty_region.end();
6550     if (!dirty_region.is_empty()) {
6551       cl->do_MemRegion(dirty_region);
6552     } else {
6553       assert(last_addr == end_addr, "program logic");
6554       return;
6555     }
6556   }
6557 }
6558 
6559 #ifndef PRODUCT
6560 void CMSBitMap::assert_locked() const {
6561   CMSLockVerifier::assert_locked(lock());
6562 }
6563 
6564 bool CMSBitMap::covers(MemRegion mr) const {
6565   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6566   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6567          "size inconsistency");
6568   return (mr.start() >= _bmStartWord) &&
6569          (mr.end()   <= endWord());
6570 }
6571 
6572 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6573     return (start >= _bmStartWord && (start + size) <= endWord());
6574 }
6575 
6576 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6577   // verify that there are no 1 bits in the interval [left, right)
6578   FalseBitMapClosure falseBitMapClosure;
6579   iterate(&falseBitMapClosure, left, right);
6580 }
6581 
6582 void CMSBitMap::region_invariant(MemRegion mr)
6583 {
6584   assert_locked();
6585   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6586   assert(!mr.is_empty(), "unexpected empty region");
6587   assert(covers(mr), "mr should be covered by bit map");
6588   // convert address range into offset range
6589   size_t start_ofs = heapWordToOffset(mr.start());
6590   // Make sure that end() is appropriately aligned
6591   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6592                         (1 << (_shifter+LogHeapWordSize))),
6593          "Misaligned mr.end()");
6594   size_t end_ofs   = heapWordToOffset(mr.end());
6595   assert(end_ofs > start_ofs, "Should mark at least one bit");
6596 }
6597 
6598 #endif
6599 
6600 bool CMSMarkStack::allocate(size_t size) {
6601   // allocate a stack of the requisite depth
6602   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6603                    size * sizeof(oop)));
6604   if (!rs.is_reserved()) {
6605     warning("CMSMarkStack allocation failure");
6606     return false;
6607   }
6608   if (!_virtual_space.initialize(rs, rs.size())) {
6609     warning("CMSMarkStack backing store failure");
6610     return false;
6611   }
6612   assert(_virtual_space.committed_size() == rs.size(),
6613          "didn't reserve backing store for all of CMS stack?");
6614   _base = (oop*)(_virtual_space.low());
6615   _index = 0;
6616   _capacity = size;
6617   NOT_PRODUCT(_max_depth = 0);
6618   return true;
6619 }
6620 
6621 // XXX FIX ME !!! In the MT case we come in here holding a
6622 // leaf lock. For printing we need to take a further lock
6623 // which has lower rank. We need to recallibrate the two
6624 // lock-ranks involved in order to be able to rpint the
6625 // messages below. (Or defer the printing to the caller.
6626 // For now we take the expedient path of just disabling the
6627 // messages for the problematic case.)
6628 void CMSMarkStack::expand() {
6629   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6630   if (_capacity == MarkStackSizeMax) {
6631     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6632       // We print a warning message only once per CMS cycle.
6633       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6634     }
6635     return;
6636   }
6637   // Double capacity if possible
6638   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6639   // Do not give up existing stack until we have managed to
6640   // get the double capacity that we desired.
6641   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6642                    new_capacity * sizeof(oop)));
6643   if (rs.is_reserved()) {
6644     // Release the backing store associated with old stack
6645     _virtual_space.release();
6646     // Reinitialize virtual space for new stack
6647     if (!_virtual_space.initialize(rs, rs.size())) {
6648       fatal("Not enough swap for expanded marking stack");
6649     }
6650     _base = (oop*)(_virtual_space.low());
6651     _index = 0;
6652     _capacity = new_capacity;
6653   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6654     // Failed to double capacity, continue;
6655     // we print a detail message only once per CMS cycle.
6656     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6657             SIZE_FORMAT"K",
6658             _capacity / K, new_capacity / K);
6659   }
6660 }
6661 
6662 
6663 // Closures
6664 // XXX: there seems to be a lot of code  duplication here;
6665 // should refactor and consolidate common code.
6666 
6667 // This closure is used to mark refs into the CMS generation in
6668 // the CMS bit map. Called at the first checkpoint. This closure
6669 // assumes that we do not need to re-mark dirty cards; if the CMS
6670 // generation on which this is used is not an oldest (modulo perm gen)
6671 // generation then this will lose younger_gen cards!
6672 
6673 MarkRefsIntoClosure::MarkRefsIntoClosure(
6674   MemRegion span, CMSBitMap* bitMap):
6675     _span(span),
6676     _bitMap(bitMap)
6677 {
6678     assert(_ref_processor == NULL, "deliberately left NULL");
6679     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6680 }
6681 
6682 void MarkRefsIntoClosure::do_oop(oop obj) {
6683   // if p points into _span, then mark corresponding bit in _markBitMap
6684   assert(obj->is_oop(), "expected an oop");
6685   HeapWord* addr = (HeapWord*)obj;
6686   if (_span.contains(addr)) {
6687     // this should be made more efficient
6688     _bitMap->mark(addr);
6689   }
6690 }
6691 
6692 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6693 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6694 
6695 // A variant of the above, used for CMS marking verification.
6696 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6697   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6698     _span(span),
6699     _verification_bm(verification_bm),
6700     _cms_bm(cms_bm)
6701 {
6702     assert(_ref_processor == NULL, "deliberately left NULL");
6703     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6704 }
6705 
6706 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6707   // if p points into _span, then mark corresponding bit in _markBitMap
6708   assert(obj->is_oop(), "expected an oop");
6709   HeapWord* addr = (HeapWord*)obj;
6710   if (_span.contains(addr)) {
6711     _verification_bm->mark(addr);
6712     if (!_cms_bm->isMarked(addr)) {
6713       oop(addr)->print();
6714       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6715       fatal("... aborting");
6716     }
6717   }
6718 }
6719 
6720 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6721 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6722 
6723 //////////////////////////////////////////////////
6724 // MarkRefsIntoAndScanClosure
6725 //////////////////////////////////////////////////
6726 
6727 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6728                                                        ReferenceProcessor* rp,
6729                                                        CMSBitMap* bit_map,
6730                                                        CMSBitMap* mod_union_table,
6731                                                        CMSMarkStack*  mark_stack,
6732                                                        CMSMarkStack*  revisit_stack,
6733                                                        CMSCollector* collector,
6734                                                        bool should_yield,
6735                                                        bool concurrent_precleaning):
6736   _collector(collector),
6737   _span(span),
6738   _bit_map(bit_map),
6739   _mark_stack(mark_stack),
6740   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6741                       mark_stack, revisit_stack, concurrent_precleaning),
6742   _yield(should_yield),
6743   _concurrent_precleaning(concurrent_precleaning),
6744   _freelistLock(NULL)
6745 {
6746   _ref_processor = rp;
6747   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6748 }
6749 
6750 // This closure is used to mark refs into the CMS generation at the
6751 // second (final) checkpoint, and to scan and transitively follow
6752 // the unmarked oops. It is also used during the concurrent precleaning
6753 // phase while scanning objects on dirty cards in the CMS generation.
6754 // The marks are made in the marking bit map and the marking stack is
6755 // used for keeping the (newly) grey objects during the scan.
6756 // The parallel version (Par_...) appears further below.
6757 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6758   if (obj != NULL) {
6759     assert(obj->is_oop(), "expected an oop");
6760     HeapWord* addr = (HeapWord*)obj;
6761     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6762     assert(_collector->overflow_list_is_empty(),
6763            "overflow list should be empty");
6764     if (_span.contains(addr) &&
6765         !_bit_map->isMarked(addr)) {
6766       // mark bit map (object is now grey)
6767       _bit_map->mark(addr);
6768       // push on marking stack (stack should be empty), and drain the
6769       // stack by applying this closure to the oops in the oops popped
6770       // from the stack (i.e. blacken the grey objects)
6771       bool res = _mark_stack->push(obj);
6772       assert(res, "Should have space to push on empty stack");
6773       do {
6774         oop new_oop = _mark_stack->pop();
6775         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6776         assert(new_oop->is_parsable(), "Found unparsable oop");
6777         assert(_bit_map->isMarked((HeapWord*)new_oop),
6778                "only grey objects on this stack");
6779         // iterate over the oops in this oop, marking and pushing
6780         // the ones in CMS heap (i.e. in _span).
6781         new_oop->oop_iterate(&_pushAndMarkClosure);
6782         // check if it's time to yield
6783         do_yield_check();
6784       } while (!_mark_stack->isEmpty() ||
6785                (!_concurrent_precleaning && take_from_overflow_list()));
6786         // if marking stack is empty, and we are not doing this
6787         // during precleaning, then check the overflow list
6788     }
6789     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6790     assert(_collector->overflow_list_is_empty(),
6791            "overflow list was drained above");
6792     // We could restore evacuated mark words, if any, used for
6793     // overflow list links here because the overflow list is
6794     // provably empty here. That would reduce the maximum
6795     // size requirements for preserved_{oop,mark}_stack.
6796     // But we'll just postpone it until we are all done
6797     // so we can just stream through.
6798     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6799       _collector->restore_preserved_marks_if_any();
6800       assert(_collector->no_preserved_marks(), "No preserved marks");
6801     }
6802     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6803            "All preserved marks should have been restored above");
6804   }
6805 }
6806 
6807 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6808 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6809 
6810 void MarkRefsIntoAndScanClosure::do_yield_work() {
6811   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6812          "CMS thread should hold CMS token");
6813   assert_lock_strong(_freelistLock);
6814   assert_lock_strong(_bit_map->lock());
6815   // relinquish the free_list_lock and bitMaplock()
6816   DEBUG_ONLY(RememberKlassesChecker mux(false);)
6817   _bit_map->lock()->unlock();
6818   _freelistLock->unlock();
6819   ConcurrentMarkSweepThread::desynchronize(true);
6820   ConcurrentMarkSweepThread::acknowledge_yield_request();
6821   _collector->stopTimer();
6822   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6823   if (PrintCMSStatistics != 0) {
6824     _collector->incrementYields();
6825   }
6826   _collector->icms_wait();
6827 
6828   // See the comment in coordinator_yield()
6829   for (unsigned i = 0;
6830        i < CMSYieldSleepCount &&
6831        ConcurrentMarkSweepThread::should_yield() &&
6832        !CMSCollector::foregroundGCIsActive();
6833        ++i) {
6834     os::sleep(Thread::current(), 1, false);
6835     ConcurrentMarkSweepThread::acknowledge_yield_request();
6836   }
6837 
6838   ConcurrentMarkSweepThread::synchronize(true);
6839   _freelistLock->lock_without_safepoint_check();
6840   _bit_map->lock()->lock_without_safepoint_check();
6841   _collector->startTimer();
6842 }
6843 
6844 ///////////////////////////////////////////////////////////
6845 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6846 //                                 MarkRefsIntoAndScanClosure
6847 ///////////////////////////////////////////////////////////
6848 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6849   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6850   CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack*  revisit_stack):
6851   _span(span),
6852   _bit_map(bit_map),
6853   _work_queue(work_queue),
6854   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6855                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6856   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6857                           revisit_stack)
6858 {
6859   _ref_processor = rp;
6860   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6861 }
6862 
6863 // This closure is used to mark refs into the CMS generation at the
6864 // second (final) checkpoint, and to scan and transitively follow
6865 // the unmarked oops. The marks are made in the marking bit map and
6866 // the work_queue is used for keeping the (newly) grey objects during
6867 // the scan phase whence they are also available for stealing by parallel
6868 // threads. Since the marking bit map is shared, updates are
6869 // synchronized (via CAS).
6870 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6871   if (obj != NULL) {
6872     // Ignore mark word because this could be an already marked oop
6873     // that may be chained at the end of the overflow list.
6874     assert(obj->is_oop(true), "expected an oop");
6875     HeapWord* addr = (HeapWord*)obj;
6876     if (_span.contains(addr) &&
6877         !_bit_map->isMarked(addr)) {
6878       // mark bit map (object will become grey):
6879       // It is possible for several threads to be
6880       // trying to "claim" this object concurrently;
6881       // the unique thread that succeeds in marking the
6882       // object first will do the subsequent push on
6883       // to the work queue (or overflow list).
6884       if (_bit_map->par_mark(addr)) {
6885         // push on work_queue (which may not be empty), and trim the
6886         // queue to an appropriate length by applying this closure to
6887         // the oops in the oops popped from the stack (i.e. blacken the
6888         // grey objects)
6889         bool res = _work_queue->push(obj);
6890         assert(res, "Low water mark should be less than capacity?");
6891         trim_queue(_low_water_mark);
6892       } // Else, another thread claimed the object
6893     }
6894   }
6895 }
6896 
6897 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6898 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6899 
6900 // This closure is used to rescan the marked objects on the dirty cards
6901 // in the mod union table and the card table proper.
6902 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6903   oop p, MemRegion mr) {
6904 
6905   size_t size = 0;
6906   HeapWord* addr = (HeapWord*)p;
6907   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6908   assert(_span.contains(addr), "we are scanning the CMS generation");
6909   // check if it's time to yield
6910   if (do_yield_check()) {
6911     // We yielded for some foreground stop-world work,
6912     // and we have been asked to abort this ongoing preclean cycle.
6913     return 0;
6914   }
6915   if (_bitMap->isMarked(addr)) {
6916     // it's marked; is it potentially uninitialized?
6917     if (p->klass_or_null() != NULL) {
6918       // If is_conc_safe is false, the object may be undergoing
6919       // change by the VM outside a safepoint.  Don't try to
6920       // scan it, but rather leave it for the remark phase.
6921       if (CMSPermGenPrecleaningEnabled &&
6922           (!p->is_conc_safe() || !p->is_parsable())) {
6923         // Signal precleaning to redirty the card since
6924         // the klass pointer is already installed.
6925         assert(size == 0, "Initial value");
6926       } else {
6927         assert(p->is_parsable(), "must be parsable.");
6928         // an initialized object; ignore mark word in verification below
6929         // since we are running concurrent with mutators
6930         assert(p->is_oop(true), "should be an oop");
6931         if (p->is_objArray()) {
6932           // objArrays are precisely marked; restrict scanning
6933           // to dirty cards only.
6934           size = CompactibleFreeListSpace::adjustObjectSize(
6935                    p->oop_iterate(_scanningClosure, mr));
6936         } else {
6937           // A non-array may have been imprecisely marked; we need
6938           // to scan object in its entirety.
6939           size = CompactibleFreeListSpace::adjustObjectSize(
6940                    p->oop_iterate(_scanningClosure));
6941         }
6942         #ifdef DEBUG
6943           size_t direct_size =
6944             CompactibleFreeListSpace::adjustObjectSize(p->size());
6945           assert(size == direct_size, "Inconsistency in size");
6946           assert(size >= 3, "Necessary for Printezis marks to work");
6947           if (!_bitMap->isMarked(addr+1)) {
6948             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6949           } else {
6950             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6951             assert(_bitMap->isMarked(addr+size-1),
6952                    "inconsistent Printezis mark");
6953           }
6954         #endif // DEBUG
6955       }
6956     } else {
6957       // an unitialized object
6958       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6959       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6960       size = pointer_delta(nextOneAddr + 1, addr);
6961       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6962              "alignment problem");
6963       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6964       // will dirty the card when the klass pointer is installed in the
6965       // object (signalling the completion of initialization).
6966     }
6967   } else {
6968     // Either a not yet marked object or an uninitialized object
6969     if (p->klass_or_null() == NULL || !p->is_parsable()) {
6970       // An uninitialized object, skip to the next card, since
6971       // we may not be able to read its P-bits yet.
6972       assert(size == 0, "Initial value");
6973     } else {
6974       // An object not (yet) reached by marking: we merely need to
6975       // compute its size so as to go look at the next block.
6976       assert(p->is_oop(true), "should be an oop");
6977       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6978     }
6979   }
6980   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6981   return size;
6982 }
6983 
6984 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6985   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6986          "CMS thread should hold CMS token");
6987   assert_lock_strong(_freelistLock);
6988   assert_lock_strong(_bitMap->lock());
6989   DEBUG_ONLY(RememberKlassesChecker mux(false);)
6990   // relinquish the free_list_lock and bitMaplock()
6991   _bitMap->lock()->unlock();
6992   _freelistLock->unlock();
6993   ConcurrentMarkSweepThread::desynchronize(true);
6994   ConcurrentMarkSweepThread::acknowledge_yield_request();
6995   _collector->stopTimer();
6996   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6997   if (PrintCMSStatistics != 0) {
6998     _collector->incrementYields();
6999   }
7000   _collector->icms_wait();
7001 
7002   // See the comment in coordinator_yield()
7003   for (unsigned i = 0; i < CMSYieldSleepCount &&
7004                    ConcurrentMarkSweepThread::should_yield() &&
7005                    !CMSCollector::foregroundGCIsActive(); ++i) {
7006     os::sleep(Thread::current(), 1, false);
7007     ConcurrentMarkSweepThread::acknowledge_yield_request();
7008   }
7009 
7010   ConcurrentMarkSweepThread::synchronize(true);
7011   _freelistLock->lock_without_safepoint_check();
7012   _bitMap->lock()->lock_without_safepoint_check();
7013   _collector->startTimer();
7014 }
7015 
7016 
7017 //////////////////////////////////////////////////////////////////
7018 // SurvivorSpacePrecleanClosure
7019 //////////////////////////////////////////////////////////////////
7020 // This (single-threaded) closure is used to preclean the oops in
7021 // the survivor spaces.
7022 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7023 
7024   HeapWord* addr = (HeapWord*)p;
7025   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7026   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7027   assert(p->klass_or_null() != NULL, "object should be initializd");
7028   assert(p->is_parsable(), "must be parsable.");
7029   // an initialized object; ignore mark word in verification below
7030   // since we are running concurrent with mutators
7031   assert(p->is_oop(true), "should be an oop");
7032   // Note that we do not yield while we iterate over
7033   // the interior oops of p, pushing the relevant ones
7034   // on our marking stack.
7035   size_t size = p->oop_iterate(_scanning_closure);
7036   do_yield_check();
7037   // Observe that below, we do not abandon the preclean
7038   // phase as soon as we should; rather we empty the
7039   // marking stack before returning. This is to satisfy
7040   // some existing assertions. In general, it may be a
7041   // good idea to abort immediately and complete the marking
7042   // from the grey objects at a later time.
7043   while (!_mark_stack->isEmpty()) {
7044     oop new_oop = _mark_stack->pop();
7045     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7046     assert(new_oop->is_parsable(), "Found unparsable oop");
7047     assert(_bit_map->isMarked((HeapWord*)new_oop),
7048            "only grey objects on this stack");
7049     // iterate over the oops in this oop, marking and pushing
7050     // the ones in CMS heap (i.e. in _span).
7051     new_oop->oop_iterate(_scanning_closure);
7052     // check if it's time to yield
7053     do_yield_check();
7054   }
7055   unsigned int after_count =
7056     GenCollectedHeap::heap()->total_collections();
7057   bool abort = (_before_count != after_count) ||
7058                _collector->should_abort_preclean();
7059   return abort ? 0 : size;
7060 }
7061 
7062 void SurvivorSpacePrecleanClosure::do_yield_work() {
7063   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7064          "CMS thread should hold CMS token");
7065   assert_lock_strong(_bit_map->lock());
7066   DEBUG_ONLY(RememberKlassesChecker smx(false);)
7067   // Relinquish the bit map lock
7068   _bit_map->lock()->unlock();
7069   ConcurrentMarkSweepThread::desynchronize(true);
7070   ConcurrentMarkSweepThread::acknowledge_yield_request();
7071   _collector->stopTimer();
7072   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7073   if (PrintCMSStatistics != 0) {
7074     _collector->incrementYields();
7075   }
7076   _collector->icms_wait();
7077 
7078   // See the comment in coordinator_yield()
7079   for (unsigned i = 0; i < CMSYieldSleepCount &&
7080                        ConcurrentMarkSweepThread::should_yield() &&
7081                        !CMSCollector::foregroundGCIsActive(); ++i) {
7082     os::sleep(Thread::current(), 1, false);
7083     ConcurrentMarkSweepThread::acknowledge_yield_request();
7084   }
7085 
7086   ConcurrentMarkSweepThread::synchronize(true);
7087   _bit_map->lock()->lock_without_safepoint_check();
7088   _collector->startTimer();
7089 }
7090 
7091 // This closure is used to rescan the marked objects on the dirty cards
7092 // in the mod union table and the card table proper. In the parallel
7093 // case, although the bitMap is shared, we do a single read so the
7094 // isMarked() query is "safe".
7095 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7096   // Ignore mark word because we are running concurrent with mutators
7097   assert(p->is_oop_or_null(true), "expected an oop or null");
7098   HeapWord* addr = (HeapWord*)p;
7099   assert(_span.contains(addr), "we are scanning the CMS generation");
7100   bool is_obj_array = false;
7101   #ifdef DEBUG
7102     if (!_parallel) {
7103       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7104       assert(_collector->overflow_list_is_empty(),
7105              "overflow list should be empty");
7106 
7107     }
7108   #endif // DEBUG
7109   if (_bit_map->isMarked(addr)) {
7110     // Obj arrays are precisely marked, non-arrays are not;
7111     // so we scan objArrays precisely and non-arrays in their
7112     // entirety.
7113     if (p->is_objArray()) {
7114       is_obj_array = true;
7115       if (_parallel) {
7116         p->oop_iterate(_par_scan_closure, mr);
7117       } else {
7118         p->oop_iterate(_scan_closure, mr);
7119       }
7120     } else {
7121       if (_parallel) {
7122         p->oop_iterate(_par_scan_closure);
7123       } else {
7124         p->oop_iterate(_scan_closure);
7125       }
7126     }
7127   }
7128   #ifdef DEBUG
7129     if (!_parallel) {
7130       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7131       assert(_collector->overflow_list_is_empty(),
7132              "overflow list should be empty");
7133 
7134     }
7135   #endif // DEBUG
7136   return is_obj_array;
7137 }
7138 
7139 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7140                         MemRegion span,
7141                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
7142                         CMSMarkStack*  revisitStack,
7143                         bool should_yield, bool verifying):
7144   _collector(collector),
7145   _span(span),
7146   _bitMap(bitMap),
7147   _mut(&collector->_modUnionTable),
7148   _markStack(markStack),
7149   _revisitStack(revisitStack),
7150   _yield(should_yield),
7151   _skipBits(0)
7152 {
7153   assert(_markStack->isEmpty(), "stack should be empty");
7154   _finger = _bitMap->startWord();
7155   _threshold = _finger;
7156   assert(_collector->_restart_addr == NULL, "Sanity check");
7157   assert(_span.contains(_finger), "Out of bounds _finger?");
7158   DEBUG_ONLY(_verifying = verifying;)
7159 }
7160 
7161 void MarkFromRootsClosure::reset(HeapWord* addr) {
7162   assert(_markStack->isEmpty(), "would cause duplicates on stack");
7163   assert(_span.contains(addr), "Out of bounds _finger?");
7164   _finger = addr;
7165   _threshold = (HeapWord*)round_to(
7166                  (intptr_t)_finger, CardTableModRefBS::card_size);
7167 }
7168 
7169 // Should revisit to see if this should be restructured for
7170 // greater efficiency.
7171 bool MarkFromRootsClosure::do_bit(size_t offset) {
7172   if (_skipBits > 0) {
7173     _skipBits--;
7174     return true;
7175   }
7176   // convert offset into a HeapWord*
7177   HeapWord* addr = _bitMap->startWord() + offset;
7178   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7179          "address out of range");
7180   assert(_bitMap->isMarked(addr), "tautology");
7181   if (_bitMap->isMarked(addr+1)) {
7182     // this is an allocated but not yet initialized object
7183     assert(_skipBits == 0, "tautology");
7184     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
7185     oop p = oop(addr);
7186     if (p->klass_or_null() == NULL || !p->is_parsable()) {
7187       DEBUG_ONLY(if (!_verifying) {)
7188         // We re-dirty the cards on which this object lies and increase
7189         // the _threshold so that we'll come back to scan this object
7190         // during the preclean or remark phase. (CMSCleanOnEnter)
7191         if (CMSCleanOnEnter) {
7192           size_t sz = _collector->block_size_using_printezis_bits(addr);
7193           HeapWord* end_card_addr   = (HeapWord*)round_to(
7194                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7195           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7196           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7197           // Bump _threshold to end_card_addr; note that
7198           // _threshold cannot possibly exceed end_card_addr, anyhow.
7199           // This prevents future clearing of the card as the scan proceeds
7200           // to the right.
7201           assert(_threshold <= end_card_addr,
7202                  "Because we are just scanning into this object");
7203           if (_threshold < end_card_addr) {
7204             _threshold = end_card_addr;
7205           }
7206           if (p->klass_or_null() != NULL) {
7207             // Redirty the range of cards...
7208             _mut->mark_range(redirty_range);
7209           } // ...else the setting of klass will dirty the card anyway.
7210         }
7211       DEBUG_ONLY(})
7212       return true;
7213     }
7214   }
7215   scanOopsInOop(addr);
7216   return true;
7217 }
7218 
7219 // We take a break if we've been at this for a while,
7220 // so as to avoid monopolizing the locks involved.
7221 void MarkFromRootsClosure::do_yield_work() {
7222   // First give up the locks, then yield, then re-lock
7223   // We should probably use a constructor/destructor idiom to
7224   // do this unlock/lock or modify the MutexUnlocker class to
7225   // serve our purpose. XXX
7226   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7227          "CMS thread should hold CMS token");
7228   assert_lock_strong(_bitMap->lock());
7229   DEBUG_ONLY(RememberKlassesChecker mux(false);)
7230   _bitMap->lock()->unlock();
7231   ConcurrentMarkSweepThread::desynchronize(true);
7232   ConcurrentMarkSweepThread::acknowledge_yield_request();
7233   _collector->stopTimer();
7234   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7235   if (PrintCMSStatistics != 0) {
7236     _collector->incrementYields();
7237   }
7238   _collector->icms_wait();
7239 
7240   // See the comment in coordinator_yield()
7241   for (unsigned i = 0; i < CMSYieldSleepCount &&
7242                        ConcurrentMarkSweepThread::should_yield() &&
7243                        !CMSCollector::foregroundGCIsActive(); ++i) {
7244     os::sleep(Thread::current(), 1, false);
7245     ConcurrentMarkSweepThread::acknowledge_yield_request();
7246   }
7247 
7248   ConcurrentMarkSweepThread::synchronize(true);
7249   _bitMap->lock()->lock_without_safepoint_check();
7250   _collector->startTimer();
7251 }
7252 
7253 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7254   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7255   assert(_markStack->isEmpty(),
7256          "should drain stack to limit stack usage");
7257   // convert ptr to an oop preparatory to scanning
7258   oop obj = oop(ptr);
7259   // Ignore mark word in verification below, since we
7260   // may be running concurrent with mutators.
7261   assert(obj->is_oop(true), "should be an oop");
7262   assert(_finger <= ptr, "_finger runneth ahead");
7263   // advance the finger to right end of this object
7264   _finger = ptr + obj->size();
7265   assert(_finger > ptr, "we just incremented it above");
7266   // On large heaps, it may take us some time to get through
7267   // the marking phase (especially if running iCMS). During
7268   // this time it's possible that a lot of mutations have
7269   // accumulated in the card table and the mod union table --
7270   // these mutation records are redundant until we have
7271   // actually traced into the corresponding card.
7272   // Here, we check whether advancing the finger would make
7273   // us cross into a new card, and if so clear corresponding
7274   // cards in the MUT (preclean them in the card-table in the
7275   // future).
7276 
7277   DEBUG_ONLY(if (!_verifying) {)
7278     // The clean-on-enter optimization is disabled by default,
7279     // until we fix 6178663.
7280     if (CMSCleanOnEnter && (_finger > _threshold)) {
7281       // [_threshold, _finger) represents the interval
7282       // of cards to be cleared  in MUT (or precleaned in card table).
7283       // The set of cards to be cleared is all those that overlap
7284       // with the interval [_threshold, _finger); note that
7285       // _threshold is always kept card-aligned but _finger isn't
7286       // always card-aligned.
7287       HeapWord* old_threshold = _threshold;
7288       assert(old_threshold == (HeapWord*)round_to(
7289               (intptr_t)old_threshold, CardTableModRefBS::card_size),
7290              "_threshold should always be card-aligned");
7291       _threshold = (HeapWord*)round_to(
7292                      (intptr_t)_finger, CardTableModRefBS::card_size);
7293       MemRegion mr(old_threshold, _threshold);
7294       assert(!mr.is_empty(), "Control point invariant");
7295       assert(_span.contains(mr), "Should clear within span");
7296       // XXX When _finger crosses from old gen into perm gen
7297       // we may be doing unnecessary cleaning; do better in the
7298       // future by detecting that condition and clearing fewer
7299       // MUT/CT entries.
7300       _mut->clear_range(mr);
7301     }
7302   DEBUG_ONLY(})
7303   // Note: the finger doesn't advance while we drain
7304   // the stack below.
7305   PushOrMarkClosure pushOrMarkClosure(_collector,
7306                                       _span, _bitMap, _markStack,
7307                                       _revisitStack,
7308                                       _finger, this);
7309   bool res = _markStack->push(obj);
7310   assert(res, "Empty non-zero size stack should have space for single push");
7311   while (!_markStack->isEmpty()) {
7312     oop new_oop = _markStack->pop();
7313     // Skip verifying header mark word below because we are
7314     // running concurrent with mutators.
7315     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7316     // now scan this oop's oops
7317     new_oop->oop_iterate(&pushOrMarkClosure);
7318     do_yield_check();
7319   }
7320   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7321 }
7322 
7323 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7324                        CMSCollector* collector, MemRegion span,
7325                        CMSBitMap* bit_map,
7326                        OopTaskQueue* work_queue,
7327                        CMSMarkStack*  overflow_stack,
7328                        CMSMarkStack*  revisit_stack,
7329                        bool should_yield):
7330   _collector(collector),
7331   _whole_span(collector->_span),
7332   _span(span),
7333   _bit_map(bit_map),
7334   _mut(&collector->_modUnionTable),
7335   _work_queue(work_queue),
7336   _overflow_stack(overflow_stack),
7337   _revisit_stack(revisit_stack),
7338   _yield(should_yield),
7339   _skip_bits(0),
7340   _task(task)
7341 {
7342   assert(_work_queue->size() == 0, "work_queue should be empty");
7343   _finger = span.start();
7344   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
7345   assert(_span.contains(_finger), "Out of bounds _finger?");
7346 }
7347 
7348 // Should revisit to see if this should be restructured for
7349 // greater efficiency.
7350 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7351   if (_skip_bits > 0) {
7352     _skip_bits--;
7353     return true;
7354   }
7355   // convert offset into a HeapWord*
7356   HeapWord* addr = _bit_map->startWord() + offset;
7357   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7358          "address out of range");
7359   assert(_bit_map->isMarked(addr), "tautology");
7360   if (_bit_map->isMarked(addr+1)) {
7361     // this is an allocated object that might not yet be initialized
7362     assert(_skip_bits == 0, "tautology");
7363     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
7364     oop p = oop(addr);
7365     if (p->klass_or_null() == NULL || !p->is_parsable()) {
7366       // in the case of Clean-on-Enter optimization, redirty card
7367       // and avoid clearing card by increasing  the threshold.
7368       return true;
7369     }
7370   }
7371   scan_oops_in_oop(addr);
7372   return true;
7373 }
7374 
7375 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7376   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7377   // Should we assert that our work queue is empty or
7378   // below some drain limit?
7379   assert(_work_queue->size() == 0,
7380          "should drain stack to limit stack usage");
7381   // convert ptr to an oop preparatory to scanning
7382   oop obj = oop(ptr);
7383   // Ignore mark word in verification below, since we
7384   // may be running concurrent with mutators.
7385   assert(obj->is_oop(true), "should be an oop");
7386   assert(_finger <= ptr, "_finger runneth ahead");
7387   // advance the finger to right end of this object
7388   _finger = ptr + obj->size();
7389   assert(_finger > ptr, "we just incremented it above");
7390   // On large heaps, it may take us some time to get through
7391   // the marking phase (especially if running iCMS). During
7392   // this time it's possible that a lot of mutations have
7393   // accumulated in the card table and the mod union table --
7394   // these mutation records are redundant until we have
7395   // actually traced into the corresponding card.
7396   // Here, we check whether advancing the finger would make
7397   // us cross into a new card, and if so clear corresponding
7398   // cards in the MUT (preclean them in the card-table in the
7399   // future).
7400 
7401   // The clean-on-enter optimization is disabled by default,
7402   // until we fix 6178663.
7403   if (CMSCleanOnEnter && (_finger > _threshold)) {
7404     // [_threshold, _finger) represents the interval
7405     // of cards to be cleared  in MUT (or precleaned in card table).
7406     // The set of cards to be cleared is all those that overlap
7407     // with the interval [_threshold, _finger); note that
7408     // _threshold is always kept card-aligned but _finger isn't
7409     // always card-aligned.
7410     HeapWord* old_threshold = _threshold;
7411     assert(old_threshold == (HeapWord*)round_to(
7412             (intptr_t)old_threshold, CardTableModRefBS::card_size),
7413            "_threshold should always be card-aligned");
7414     _threshold = (HeapWord*)round_to(
7415                    (intptr_t)_finger, CardTableModRefBS::card_size);
7416     MemRegion mr(old_threshold, _threshold);
7417     assert(!mr.is_empty(), "Control point invariant");
7418     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7419     // XXX When _finger crosses from old gen into perm gen
7420     // we may be doing unnecessary cleaning; do better in the
7421     // future by detecting that condition and clearing fewer
7422     // MUT/CT entries.
7423     _mut->clear_range(mr);
7424   }
7425 
7426   // Note: the local finger doesn't advance while we drain
7427   // the stack below, but the global finger sure can and will.
7428   HeapWord** gfa = _task->global_finger_addr();
7429   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7430                                       _span, _bit_map,
7431                                       _work_queue,
7432                                       _overflow_stack,
7433                                       _revisit_stack,
7434                                       _finger,
7435                                       gfa, this);
7436   bool res = _work_queue->push(obj);   // overflow could occur here
7437   assert(res, "Will hold once we use workqueues");
7438   while (true) {
7439     oop new_oop;
7440     if (!_work_queue->pop_local(new_oop)) {
7441       // We emptied our work_queue; check if there's stuff that can
7442       // be gotten from the overflow stack.
7443       if (CMSConcMarkingTask::get_work_from_overflow_stack(
7444             _overflow_stack, _work_queue)) {
7445         do_yield_check();
7446         continue;
7447       } else {  // done
7448         break;
7449       }
7450     }
7451     // Skip verifying header mark word below because we are
7452     // running concurrent with mutators.
7453     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7454     // now scan this oop's oops
7455     new_oop->oop_iterate(&pushOrMarkClosure);
7456     do_yield_check();
7457   }
7458   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7459 }
7460 
7461 // Yield in response to a request from VM Thread or
7462 // from mutators.
7463 void Par_MarkFromRootsClosure::do_yield_work() {
7464   assert(_task != NULL, "sanity");
7465   _task->yield();
7466 }
7467 
7468 // A variant of the above used for verifying CMS marking work.
7469 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7470                         MemRegion span,
7471                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7472                         CMSMarkStack*  mark_stack):
7473   _collector(collector),
7474   _span(span),
7475   _verification_bm(verification_bm),
7476   _cms_bm(cms_bm),
7477   _mark_stack(mark_stack),
7478   _pam_verify_closure(collector, span, verification_bm, cms_bm,
7479                       mark_stack)
7480 {
7481   assert(_mark_stack->isEmpty(), "stack should be empty");
7482   _finger = _verification_bm->startWord();
7483   assert(_collector->_restart_addr == NULL, "Sanity check");
7484   assert(_span.contains(_finger), "Out of bounds _finger?");
7485 }
7486 
7487 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7488   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7489   assert(_span.contains(addr), "Out of bounds _finger?");
7490   _finger = addr;
7491 }
7492 
7493 // Should revisit to see if this should be restructured for
7494 // greater efficiency.
7495 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7496   // convert offset into a HeapWord*
7497   HeapWord* addr = _verification_bm->startWord() + offset;
7498   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7499          "address out of range");
7500   assert(_verification_bm->isMarked(addr), "tautology");
7501   assert(_cms_bm->isMarked(addr), "tautology");
7502 
7503   assert(_mark_stack->isEmpty(),
7504          "should drain stack to limit stack usage");
7505   // convert addr to an oop preparatory to scanning
7506   oop obj = oop(addr);
7507   assert(obj->is_oop(), "should be an oop");
7508   assert(_finger <= addr, "_finger runneth ahead");
7509   // advance the finger to right end of this object
7510   _finger = addr + obj->size();
7511   assert(_finger > addr, "we just incremented it above");
7512   // Note: the finger doesn't advance while we drain
7513   // the stack below.
7514   bool res = _mark_stack->push(obj);
7515   assert(res, "Empty non-zero size stack should have space for single push");
7516   while (!_mark_stack->isEmpty()) {
7517     oop new_oop = _mark_stack->pop();
7518     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7519     // now scan this oop's oops
7520     new_oop->oop_iterate(&_pam_verify_closure);
7521   }
7522   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7523   return true;
7524 }
7525 
7526 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7527   CMSCollector* collector, MemRegion span,
7528   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7529   CMSMarkStack*  mark_stack):
7530   OopClosure(collector->ref_processor()),
7531   _collector(collector),
7532   _span(span),
7533   _verification_bm(verification_bm),
7534   _cms_bm(cms_bm),
7535   _mark_stack(mark_stack)
7536 { }
7537 
7538 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
7539 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7540 
7541 // Upon stack overflow, we discard (part of) the stack,
7542 // remembering the least address amongst those discarded
7543 // in CMSCollector's _restart_address.
7544 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7545   // Remember the least grey address discarded
7546   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7547   _collector->lower_restart_addr(ra);
7548   _mark_stack->reset();  // discard stack contents
7549   _mark_stack->expand(); // expand the stack if possible
7550 }
7551 
7552 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7553   assert(obj->is_oop_or_null(), "expected an oop or NULL");
7554   HeapWord* addr = (HeapWord*)obj;
7555   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7556     // Oop lies in _span and isn't yet grey or black
7557     _verification_bm->mark(addr);            // now grey
7558     if (!_cms_bm->isMarked(addr)) {
7559       oop(addr)->print();
7560       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7561                              addr);
7562       fatal("... aborting");
7563     }
7564 
7565     if (!_mark_stack->push(obj)) { // stack overflow
7566       if (PrintCMSStatistics != 0) {
7567         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7568                                SIZE_FORMAT, _mark_stack->capacity());
7569       }
7570       assert(_mark_stack->isFull(), "Else push should have succeeded");
7571       handle_stack_overflow(addr);
7572     }
7573     // anything including and to the right of _finger
7574     // will be scanned as we iterate over the remainder of the
7575     // bit map
7576   }
7577 }
7578 
7579 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7580                      MemRegion span,
7581                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7582                      CMSMarkStack*  revisitStack,
7583                      HeapWord* finger, MarkFromRootsClosure* parent) :
7584   KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
7585   _span(span),
7586   _bitMap(bitMap),
7587   _markStack(markStack),
7588   _finger(finger),
7589   _parent(parent)
7590 { }
7591 
7592 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7593                      MemRegion span,
7594                      CMSBitMap* bit_map,
7595                      OopTaskQueue* work_queue,
7596                      CMSMarkStack*  overflow_stack,
7597                      CMSMarkStack*  revisit_stack,
7598                      HeapWord* finger,
7599                      HeapWord** global_finger_addr,
7600                      Par_MarkFromRootsClosure* parent) :
7601   Par_KlassRememberingOopClosure(collector,
7602                             collector->ref_processor(),
7603                             revisit_stack),
7604   _whole_span(collector->_span),
7605   _span(span),
7606   _bit_map(bit_map),
7607   _work_queue(work_queue),
7608   _overflow_stack(overflow_stack),
7609   _finger(finger),
7610   _global_finger_addr(global_finger_addr),
7611   _parent(parent)
7612 { }
7613 
7614 // Assumes thread-safe access by callers, who are
7615 // responsible for mutual exclusion.
7616 void CMSCollector::lower_restart_addr(HeapWord* low) {
7617   assert(_span.contains(low), "Out of bounds addr");
7618   if (_restart_addr == NULL) {
7619     _restart_addr = low;
7620   } else {
7621     _restart_addr = MIN2(_restart_addr, low);
7622   }
7623 }
7624 
7625 // Upon stack overflow, we discard (part of) the stack,
7626 // remembering the least address amongst those discarded
7627 // in CMSCollector's _restart_address.
7628 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7629   // Remember the least grey address discarded
7630   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7631   _collector->lower_restart_addr(ra);
7632   _markStack->reset();  // discard stack contents
7633   _markStack->expand(); // expand the stack if possible
7634 }
7635 
7636 // Upon stack overflow, we discard (part of) the stack,
7637 // remembering the least address amongst those discarded
7638 // in CMSCollector's _restart_address.
7639 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7640   // We need to do this under a mutex to prevent other
7641   // workers from interfering with the work done below.
7642   MutexLockerEx ml(_overflow_stack->par_lock(),
7643                    Mutex::_no_safepoint_check_flag);
7644   // Remember the least grey address discarded
7645   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7646   _collector->lower_restart_addr(ra);
7647   _overflow_stack->reset();  // discard stack contents
7648   _overflow_stack->expand(); // expand the stack if possible
7649 }
7650 
7651 void PushOrMarkClosure::do_oop(oop obj) {
7652   // Ignore mark word because we are running concurrent with mutators.
7653   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7654   HeapWord* addr = (HeapWord*)obj;
7655   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7656     // Oop lies in _span and isn't yet grey or black
7657     _bitMap->mark(addr);            // now grey
7658     if (addr < _finger) {
7659       // the bit map iteration has already either passed, or
7660       // sampled, this bit in the bit map; we'll need to
7661       // use the marking stack to scan this oop's oops.
7662       bool simulate_overflow = false;
7663       NOT_PRODUCT(
7664         if (CMSMarkStackOverflowALot &&
7665             _collector->simulate_overflow()) {
7666           // simulate a stack overflow
7667           simulate_overflow = true;
7668         }
7669       )
7670       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7671         if (PrintCMSStatistics != 0) {
7672           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7673                                  SIZE_FORMAT, _markStack->capacity());
7674         }
7675         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7676         handle_stack_overflow(addr);
7677       }
7678     }
7679     // anything including and to the right of _finger
7680     // will be scanned as we iterate over the remainder of the
7681     // bit map
7682     do_yield_check();
7683   }
7684 }
7685 
7686 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7687 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7688 
7689 void Par_PushOrMarkClosure::do_oop(oop obj) {
7690   // Ignore mark word because we are running concurrent with mutators.
7691   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7692   HeapWord* addr = (HeapWord*)obj;
7693   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7694     // Oop lies in _span and isn't yet grey or black
7695     // We read the global_finger (volatile read) strictly after marking oop
7696     bool res = _bit_map->par_mark(addr);    // now grey
7697     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7698     // Should we push this marked oop on our stack?
7699     // -- if someone else marked it, nothing to do
7700     // -- if target oop is above global finger nothing to do
7701     // -- if target oop is in chunk and above local finger
7702     //      then nothing to do
7703     // -- else push on work queue
7704     if (   !res       // someone else marked it, they will deal with it
7705         || (addr >= *gfa)  // will be scanned in a later task
7706         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7707       return;
7708     }
7709     // the bit map iteration has already either passed, or
7710     // sampled, this bit in the bit map; we'll need to
7711     // use the marking stack to scan this oop's oops.
7712     bool simulate_overflow = false;
7713     NOT_PRODUCT(
7714       if (CMSMarkStackOverflowALot &&
7715           _collector->simulate_overflow()) {
7716         // simulate a stack overflow
7717         simulate_overflow = true;
7718       }
7719     )
7720     if (simulate_overflow ||
7721         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7722       // stack overflow
7723       if (PrintCMSStatistics != 0) {
7724         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7725                                SIZE_FORMAT, _overflow_stack->capacity());
7726       }
7727       // We cannot assert that the overflow stack is full because
7728       // it may have been emptied since.
7729       assert(simulate_overflow ||
7730              _work_queue->size() == _work_queue->max_elems(),
7731             "Else push should have succeeded");
7732       handle_stack_overflow(addr);
7733     }
7734     do_yield_check();
7735   }
7736 }
7737 
7738 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7739 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7740 
7741 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7742                                              ReferenceProcessor* rp,
7743                                              CMSMarkStack* revisit_stack) :
7744   OopClosure(rp),
7745   _collector(collector),
7746   _revisit_stack(revisit_stack),
7747   _should_remember_klasses(collector->should_unload_classes()) {}
7748 
7749 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7750                                        MemRegion span,
7751                                        ReferenceProcessor* rp,
7752                                        CMSBitMap* bit_map,
7753                                        CMSBitMap* mod_union_table,
7754                                        CMSMarkStack*  mark_stack,
7755                                        CMSMarkStack*  revisit_stack,
7756                                        bool           concurrent_precleaning):
7757   KlassRememberingOopClosure(collector, rp, revisit_stack),
7758   _span(span),
7759   _bit_map(bit_map),
7760   _mod_union_table(mod_union_table),
7761   _mark_stack(mark_stack),
7762   _concurrent_precleaning(concurrent_precleaning)
7763 {
7764   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7765 }
7766 
7767 // Grey object rescan during pre-cleaning and second checkpoint phases --
7768 // the non-parallel version (the parallel version appears further below.)
7769 void PushAndMarkClosure::do_oop(oop obj) {
7770   // Ignore mark word verification. If during concurrent precleaning,
7771   // the object monitor may be locked. If during the checkpoint
7772   // phases, the object may already have been reached by a  different
7773   // path and may be at the end of the global overflow list (so
7774   // the mark word may be NULL).
7775   assert(obj->is_oop_or_null(true /* ignore mark word */),
7776          "expected an oop or NULL");
7777   HeapWord* addr = (HeapWord*)obj;
7778   // Check if oop points into the CMS generation
7779   // and is not marked
7780   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7781     // a white object ...
7782     _bit_map->mark(addr);         // ... now grey
7783     // push on the marking stack (grey set)
7784     bool simulate_overflow = false;
7785     NOT_PRODUCT(
7786       if (CMSMarkStackOverflowALot &&
7787           _collector->simulate_overflow()) {
7788         // simulate a stack overflow
7789         simulate_overflow = true;
7790       }
7791     )
7792     if (simulate_overflow || !_mark_stack->push(obj)) {
7793       if (_concurrent_precleaning) {
7794          // During precleaning we can just dirty the appropriate card(s)
7795          // in the mod union table, thus ensuring that the object remains
7796          // in the grey set  and continue. In the case of object arrays
7797          // we need to dirty all of the cards that the object spans,
7798          // since the rescan of object arrays will be limited to the
7799          // dirty cards.
7800          // Note that no one can be intefering with us in this action
7801          // of dirtying the mod union table, so no locking or atomics
7802          // are required.
7803          if (obj->is_objArray()) {
7804            size_t sz = obj->size();
7805            HeapWord* end_card_addr = (HeapWord*)round_to(
7806                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7807            MemRegion redirty_range = MemRegion(addr, end_card_addr);
7808            assert(!redirty_range.is_empty(), "Arithmetical tautology");
7809            _mod_union_table->mark_range(redirty_range);
7810          } else {
7811            _mod_union_table->mark(addr);
7812          }
7813          _collector->_ser_pmc_preclean_ovflw++;
7814       } else {
7815          // During the remark phase, we need to remember this oop
7816          // in the overflow list.
7817          _collector->push_on_overflow_list(obj);
7818          _collector->_ser_pmc_remark_ovflw++;
7819       }
7820     }
7821   }
7822 }
7823 
7824 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7825                                                MemRegion span,
7826                                                ReferenceProcessor* rp,
7827                                                CMSBitMap* bit_map,
7828                                                OopTaskQueue* work_queue,
7829                                                CMSMarkStack* revisit_stack):
7830   Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
7831   _span(span),
7832   _bit_map(bit_map),
7833   _work_queue(work_queue)
7834 {
7835   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7836 }
7837 
7838 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7839 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7840 
7841 // Grey object rescan during second checkpoint phase --
7842 // the parallel version.
7843 void Par_PushAndMarkClosure::do_oop(oop obj) {
7844   // In the assert below, we ignore the mark word because
7845   // this oop may point to an already visited object that is
7846   // on the overflow stack (in which case the mark word has
7847   // been hijacked for chaining into the overflow stack --
7848   // if this is the last object in the overflow stack then
7849   // its mark word will be NULL). Because this object may
7850   // have been subsequently popped off the global overflow
7851   // stack, and the mark word possibly restored to the prototypical
7852   // value, by the time we get to examined this failing assert in
7853   // the debugger, is_oop_or_null(false) may subsequently start
7854   // to hold.
7855   assert(obj->is_oop_or_null(true),
7856          "expected an oop or NULL");
7857   HeapWord* addr = (HeapWord*)obj;
7858   // Check if oop points into the CMS generation
7859   // and is not marked
7860   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7861     // a white object ...
7862     // If we manage to "claim" the object, by being the
7863     // first thread to mark it, then we push it on our
7864     // marking stack
7865     if (_bit_map->par_mark(addr)) {     // ... now grey
7866       // push on work queue (grey set)
7867       bool simulate_overflow = false;
7868       NOT_PRODUCT(
7869         if (CMSMarkStackOverflowALot &&
7870             _collector->par_simulate_overflow()) {
7871           // simulate a stack overflow
7872           simulate_overflow = true;
7873         }
7874       )
7875       if (simulate_overflow || !_work_queue->push(obj)) {
7876         _collector->par_push_on_overflow_list(obj);
7877         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7878       }
7879     } // Else, some other thread got there first
7880   }
7881 }
7882 
7883 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7884 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7885 
7886 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7887   // TBD
7888 }
7889 
7890 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7891   // TBD
7892 }
7893 
7894 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7895   DEBUG_ONLY(RememberKlassesChecker mux(false);)
7896   Mutex* bml = _collector->bitMapLock();
7897   assert_lock_strong(bml);
7898   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7899          "CMS thread should hold CMS token");
7900 
7901   bml->unlock();
7902   ConcurrentMarkSweepThread::desynchronize(true);
7903 
7904   ConcurrentMarkSweepThread::acknowledge_yield_request();
7905 
7906   _collector->stopTimer();
7907   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7908   if (PrintCMSStatistics != 0) {
7909     _collector->incrementYields();
7910   }
7911   _collector->icms_wait();
7912 
7913   // See the comment in coordinator_yield()
7914   for (unsigned i = 0; i < CMSYieldSleepCount &&
7915                        ConcurrentMarkSweepThread::should_yield() &&
7916                        !CMSCollector::foregroundGCIsActive(); ++i) {
7917     os::sleep(Thread::current(), 1, false);
7918     ConcurrentMarkSweepThread::acknowledge_yield_request();
7919   }
7920 
7921   ConcurrentMarkSweepThread::synchronize(true);
7922   bml->lock();
7923 
7924   _collector->startTimer();
7925 }
7926 
7927 bool CMSPrecleanRefsYieldClosure::should_return() {
7928   if (ConcurrentMarkSweepThread::should_yield()) {
7929     do_yield_work();
7930   }
7931   return _collector->foregroundGCIsActive();
7932 }
7933 
7934 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7935   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7936          "mr should be aligned to start at a card boundary");
7937   // We'd like to assert:
7938   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7939   //        "mr should be a range of cards");
7940   // However, that would be too strong in one case -- the last
7941   // partition ends at _unallocated_block which, in general, can be
7942   // an arbitrary boundary, not necessarily card aligned.
7943   if (PrintCMSStatistics != 0) {
7944     _num_dirty_cards +=
7945          mr.word_size()/CardTableModRefBS::card_size_in_words;
7946   }
7947   _space->object_iterate_mem(mr, &_scan_cl);
7948 }
7949 
7950 SweepClosure::SweepClosure(CMSCollector* collector,
7951                            ConcurrentMarkSweepGeneration* g,
7952                            CMSBitMap* bitMap, bool should_yield) :
7953   _collector(collector),
7954   _g(g),
7955   _sp(g->cmsSpace()),
7956   _limit(_sp->sweep_limit()),
7957   _freelistLock(_sp->freelistLock()),
7958   _bitMap(bitMap),
7959   _yield(should_yield),
7960   _inFreeRange(false),           // No free range at beginning of sweep
7961   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7962   _lastFreeRangeCoalesced(false),
7963   _freeFinger(g->used_region().start())
7964 {
7965   NOT_PRODUCT(
7966     _numObjectsFreed = 0;
7967     _numWordsFreed   = 0;
7968     _numObjectsLive = 0;
7969     _numWordsLive = 0;
7970     _numObjectsAlreadyFree = 0;
7971     _numWordsAlreadyFree = 0;
7972     _last_fc = NULL;
7973 
7974     _sp->initializeIndexedFreeListArrayReturnedBytes();
7975     _sp->dictionary()->initialize_dict_returned_bytes();
7976   )
7977   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7978          "sweep _limit out of bounds");
7979   if (CMSTraceSweeper) {
7980     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7981                         _limit);
7982   }
7983 }
7984 
7985 void SweepClosure::print_on(outputStream* st) const {
7986   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7987                 _sp->bottom(), _sp->end());
7988   tty->print_cr("_limit = " PTR_FORMAT, _limit);
7989   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
7990   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
7991   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7992                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7993 }
7994 
7995 #ifndef PRODUCT
7996 // Assertion checking only:  no useful work in product mode --
7997 // however, if any of the flags below become product flags,
7998 // you may need to review this code to see if it needs to be
7999 // enabled in product mode.
8000 SweepClosure::~SweepClosure() {
8001   assert_lock_strong(_freelistLock);
8002   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8003          "sweep _limit out of bounds");
8004   if (inFreeRange()) {
8005     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8006     print();
8007     ShouldNotReachHere();
8008   }
8009   if (Verbose && PrintGC) {
8010     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8011                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8012     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
8013                            SIZE_FORMAT" bytes  "
8014       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8015       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8016       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8017     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8018                         * sizeof(HeapWord);
8019     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8020 
8021     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8022       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8023       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8024       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8025       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8026       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
8027         indexListReturnedBytes);
8028       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
8029         dict_returned_bytes);
8030     }
8031   }
8032   if (CMSTraceSweeper) {
8033     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8034                            _limit);
8035   }
8036 }
8037 #endif  // PRODUCT
8038 
8039 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8040     bool freeRangeInFreeLists) {
8041   if (CMSTraceSweeper) {
8042     gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8043                freeFinger, freeRangeInFreeLists);
8044   }
8045   assert(!inFreeRange(), "Trampling existing free range");
8046   set_inFreeRange(true);
8047   set_lastFreeRangeCoalesced(false);
8048 
8049   set_freeFinger(freeFinger);
8050   set_freeRangeInFreeLists(freeRangeInFreeLists);
8051   if (CMSTestInFreeList) {
8052     if (freeRangeInFreeLists) {
8053       FreeChunk* fc = (FreeChunk*) freeFinger;
8054       assert(fc->is_free(), "A chunk on the free list should be free.");
8055       assert(fc->size() > 0, "Free range should have a size");
8056       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8057     }
8058   }
8059 }
8060 
8061 // Note that the sweeper runs concurrently with mutators. Thus,
8062 // it is possible for direct allocation in this generation to happen
8063 // in the middle of the sweep. Note that the sweeper also coalesces
8064 // contiguous free blocks. Thus, unless the sweeper and the allocator
8065 // synchronize appropriately freshly allocated blocks may get swept up.
8066 // This is accomplished by the sweeper locking the free lists while
8067 // it is sweeping. Thus blocks that are determined to be free are
8068 // indeed free. There is however one additional complication:
8069 // blocks that have been allocated since the final checkpoint and
8070 // mark, will not have been marked and so would be treated as
8071 // unreachable and swept up. To prevent this, the allocator marks
8072 // the bit map when allocating during the sweep phase. This leads,
8073 // however, to a further complication -- objects may have been allocated
8074 // but not yet initialized -- in the sense that the header isn't yet
8075 // installed. The sweeper can not then determine the size of the block
8076 // in order to skip over it. To deal with this case, we use a technique
8077 // (due to Printezis) to encode such uninitialized block sizes in the
8078 // bit map. Since the bit map uses a bit per every HeapWord, but the
8079 // CMS generation has a minimum object size of 3 HeapWords, it follows
8080 // that "normal marks" won't be adjacent in the bit map (there will
8081 // always be at least two 0 bits between successive 1 bits). We make use
8082 // of these "unused" bits to represent uninitialized blocks -- the bit
8083 // corresponding to the start of the uninitialized object and the next
8084 // bit are both set. Finally, a 1 bit marks the end of the object that
8085 // started with the two consecutive 1 bits to indicate its potentially
8086 // uninitialized state.
8087 
8088 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8089   FreeChunk* fc = (FreeChunk*)addr;
8090   size_t res;
8091 
8092   // Check if we are done sweeping. Below we check "addr >= _limit" rather
8093   // than "addr == _limit" because although _limit was a block boundary when
8094   // we started the sweep, it may no longer be one because heap expansion
8095   // may have caused us to coalesce the block ending at the address _limit
8096   // with a newly expanded chunk (this happens when _limit was set to the
8097   // previous _end of the space), so we may have stepped past _limit:
8098   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8099   if (addr >= _limit) { // we have swept up to or past the limit: finish up
8100     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8101            "sweep _limit out of bounds");
8102     assert(addr < _sp->end(), "addr out of bounds");
8103     // Flush any free range we might be holding as a single
8104     // coalesced chunk to the appropriate free list.
8105     if (inFreeRange()) {
8106       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8107              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8108       flush_cur_free_chunk(freeFinger(),
8109                            pointer_delta(addr, freeFinger()));
8110       if (CMSTraceSweeper) {
8111         gclog_or_tty->print("Sweep: last chunk: ");
8112         gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8113                    "[coalesced:"SIZE_FORMAT"]\n",
8114                    freeFinger(), pointer_delta(addr, freeFinger()),
8115                    lastFreeRangeCoalesced());
8116       }
8117     }
8118 
8119     // help the iterator loop finish
8120     return pointer_delta(_sp->end(), addr);
8121   }
8122 
8123   assert(addr < _limit, "sweep invariant");
8124   // check if we should yield
8125   do_yield_check(addr);
8126   if (fc->is_free()) {
8127     // Chunk that is already free
8128     res = fc->size();
8129     do_already_free_chunk(fc);
8130     debug_only(_sp->verifyFreeLists());
8131     // If we flush the chunk at hand in lookahead_and_flush()
8132     // and it's coalesced with a preceding chunk, then the
8133     // process of "mangling" the payload of the coalesced block
8134     // will cause erasure of the size information from the
8135     // (erstwhile) header of all the coalesced blocks but the
8136     // first, so the first disjunct in the assert will not hold
8137     // in that specific case (in which case the second disjunct
8138     // will hold).
8139     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8140            "Otherwise the size info doesn't change at this step");
8141     NOT_PRODUCT(
8142       _numObjectsAlreadyFree++;
8143       _numWordsAlreadyFree += res;
8144     )
8145     NOT_PRODUCT(_last_fc = fc;)
8146   } else if (!_bitMap->isMarked(addr)) {
8147     // Chunk is fresh garbage
8148     res = do_garbage_chunk(fc);
8149     debug_only(_sp->verifyFreeLists());
8150     NOT_PRODUCT(
8151       _numObjectsFreed++;
8152       _numWordsFreed += res;
8153     )
8154   } else {
8155     // Chunk that is alive.
8156     res = do_live_chunk(fc);
8157     debug_only(_sp->verifyFreeLists());
8158     NOT_PRODUCT(
8159         _numObjectsLive++;
8160         _numWordsLive += res;
8161     )
8162   }
8163   return res;
8164 }
8165 
8166 // For the smart allocation, record following
8167 //  split deaths - a free chunk is removed from its free list because
8168 //      it is being split into two or more chunks.
8169 //  split birth - a free chunk is being added to its free list because
8170 //      a larger free chunk has been split and resulted in this free chunk.
8171 //  coal death - a free chunk is being removed from its free list because
8172 //      it is being coalesced into a large free chunk.
8173 //  coal birth - a free chunk is being added to its free list because
8174 //      it was created when two or more free chunks where coalesced into
8175 //      this free chunk.
8176 //
8177 // These statistics are used to determine the desired number of free
8178 // chunks of a given size.  The desired number is chosen to be relative
8179 // to the end of a CMS sweep.  The desired number at the end of a sweep
8180 // is the
8181 //      count-at-end-of-previous-sweep (an amount that was enough)
8182 //              - count-at-beginning-of-current-sweep  (the excess)
8183 //              + split-births  (gains in this size during interval)
8184 //              - split-deaths  (demands on this size during interval)
8185 // where the interval is from the end of one sweep to the end of the
8186 // next.
8187 //
8188 // When sweeping the sweeper maintains an accumulated chunk which is
8189 // the chunk that is made up of chunks that have been coalesced.  That
8190 // will be termed the left-hand chunk.  A new chunk of garbage that
8191 // is being considered for coalescing will be referred to as the
8192 // right-hand chunk.
8193 //
8194 // When making a decision on whether to coalesce a right-hand chunk with
8195 // the current left-hand chunk, the current count vs. the desired count
8196 // of the left-hand chunk is considered.  Also if the right-hand chunk
8197 // is near the large chunk at the end of the heap (see
8198 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8199 // left-hand chunk is coalesced.
8200 //
8201 // When making a decision about whether to split a chunk, the desired count
8202 // vs. the current count of the candidate to be split is also considered.
8203 // If the candidate is underpopulated (currently fewer chunks than desired)
8204 // a chunk of an overpopulated (currently more chunks than desired) size may
8205 // be chosen.  The "hint" associated with a free list, if non-null, points
8206 // to a free list which may be overpopulated.
8207 //
8208 
8209 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8210   const size_t size = fc->size();
8211   // Chunks that cannot be coalesced are not in the
8212   // free lists.
8213   if (CMSTestInFreeList && !fc->cantCoalesce()) {
8214     assert(_sp->verify_chunk_in_free_list(fc),
8215       "free chunk should be in free lists");
8216   }
8217   // a chunk that is already free, should not have been
8218   // marked in the bit map
8219   HeapWord* const addr = (HeapWord*) fc;
8220   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8221   // Verify that the bit map has no bits marked between
8222   // addr and purported end of this block.
8223   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8224 
8225   // Some chunks cannot be coalesced under any circumstances.
8226   // See the definition of cantCoalesce().
8227   if (!fc->cantCoalesce()) {
8228     // This chunk can potentially be coalesced.
8229     if (_sp->adaptive_freelists()) {
8230       // All the work is done in
8231       do_post_free_or_garbage_chunk(fc, size);
8232     } else {  // Not adaptive free lists
8233       // this is a free chunk that can potentially be coalesced by the sweeper;
8234       if (!inFreeRange()) {
8235         // if the next chunk is a free block that can't be coalesced
8236         // it doesn't make sense to remove this chunk from the free lists
8237         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8238         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8239         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
8240             nextChunk->is_free()               &&     // ... which is free...
8241             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
8242           // nothing to do
8243         } else {
8244           // Potentially the start of a new free range:
8245           // Don't eagerly remove it from the free lists.
8246           // No need to remove it if it will just be put
8247           // back again.  (Also from a pragmatic point of view
8248           // if it is a free block in a region that is beyond
8249           // any allocated blocks, an assertion will fail)
8250           // Remember the start of a free run.
8251           initialize_free_range(addr, true);
8252           // end - can coalesce with next chunk
8253         }
8254       } else {
8255         // the midst of a free range, we are coalescing
8256         print_free_block_coalesced(fc);
8257         if (CMSTraceSweeper) {
8258           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
8259         }
8260         // remove it from the free lists
8261         _sp->removeFreeChunkFromFreeLists(fc);
8262         set_lastFreeRangeCoalesced(true);
8263         // If the chunk is being coalesced and the current free range is
8264         // in the free lists, remove the current free range so that it
8265         // will be returned to the free lists in its entirety - all
8266         // the coalesced pieces included.
8267         if (freeRangeInFreeLists()) {
8268           FreeChunk* ffc = (FreeChunk*) freeFinger();
8269           assert(ffc->size() == pointer_delta(addr, freeFinger()),
8270             "Size of free range is inconsistent with chunk size.");
8271           if (CMSTestInFreeList) {
8272             assert(_sp->verify_chunk_in_free_list(ffc),
8273               "free range is not in free lists");
8274           }
8275           _sp->removeFreeChunkFromFreeLists(ffc);
8276           set_freeRangeInFreeLists(false);
8277         }
8278       }
8279     }
8280     // Note that if the chunk is not coalescable (the else arm
8281     // below), we unconditionally flush, without needing to do
8282     // a "lookahead," as we do below.
8283     if (inFreeRange()) lookahead_and_flush(fc, size);
8284   } else {
8285     // Code path common to both original and adaptive free lists.
8286 
8287     // cant coalesce with previous block; this should be treated
8288     // as the end of a free run if any
8289     if (inFreeRange()) {
8290       // we kicked some butt; time to pick up the garbage
8291       assert(freeFinger() < addr, "freeFinger points too high");
8292       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8293     }
8294     // else, nothing to do, just continue
8295   }
8296 }
8297 
8298 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8299   // This is a chunk of garbage.  It is not in any free list.
8300   // Add it to a free list or let it possibly be coalesced into
8301   // a larger chunk.
8302   HeapWord* const addr = (HeapWord*) fc;
8303   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8304 
8305   if (_sp->adaptive_freelists()) {
8306     // Verify that the bit map has no bits marked between
8307     // addr and purported end of just dead object.
8308     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8309 
8310     do_post_free_or_garbage_chunk(fc, size);
8311   } else {
8312     if (!inFreeRange()) {
8313       // start of a new free range
8314       assert(size > 0, "A free range should have a size");
8315       initialize_free_range(addr, false);
8316     } else {
8317       // this will be swept up when we hit the end of the
8318       // free range
8319       if (CMSTraceSweeper) {
8320         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
8321       }
8322       // If the chunk is being coalesced and the current free range is
8323       // in the free lists, remove the current free range so that it
8324       // will be returned to the free lists in its entirety - all
8325       // the coalesced pieces included.
8326       if (freeRangeInFreeLists()) {
8327         FreeChunk* ffc = (FreeChunk*)freeFinger();
8328         assert(ffc->size() == pointer_delta(addr, freeFinger()),
8329           "Size of free range is inconsistent with chunk size.");
8330         if (CMSTestInFreeList) {
8331           assert(_sp->verify_chunk_in_free_list(ffc),
8332             "free range is not in free lists");
8333         }
8334         _sp->removeFreeChunkFromFreeLists(ffc);
8335         set_freeRangeInFreeLists(false);
8336       }
8337       set_lastFreeRangeCoalesced(true);
8338     }
8339     // this will be swept up when we hit the end of the free range
8340 
8341     // Verify that the bit map has no bits marked between
8342     // addr and purported end of just dead object.
8343     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8344   }
8345   assert(_limit >= addr + size,
8346          "A freshly garbage chunk can't possibly straddle over _limit");
8347   if (inFreeRange()) lookahead_and_flush(fc, size);
8348   return size;
8349 }
8350 
8351 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8352   HeapWord* addr = (HeapWord*) fc;
8353   // The sweeper has just found a live object. Return any accumulated
8354   // left hand chunk to the free lists.
8355   if (inFreeRange()) {
8356     assert(freeFinger() < addr, "freeFinger points too high");
8357     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8358   }
8359 
8360   // This object is live: we'd normally expect this to be
8361   // an oop, and like to assert the following:
8362   // assert(oop(addr)->is_oop(), "live block should be an oop");
8363   // However, as we commented above, this may be an object whose
8364   // header hasn't yet been initialized.
8365   size_t size;
8366   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8367   if (_bitMap->isMarked(addr + 1)) {
8368     // Determine the size from the bit map, rather than trying to
8369     // compute it from the object header.
8370     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8371     size = pointer_delta(nextOneAddr + 1, addr);
8372     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8373            "alignment problem");
8374 
8375 #ifdef DEBUG
8376       if (oop(addr)->klass_or_null() != NULL &&
8377           (   !_collector->should_unload_classes()
8378            || (oop(addr)->is_parsable()) &&
8379                oop(addr)->is_conc_safe())) {
8380         // Ignore mark word because we are running concurrent with mutators
8381         assert(oop(addr)->is_oop(true), "live block should be an oop");
8382         // is_conc_safe is checked before performing this assertion
8383         // because an object that is not is_conc_safe may yet have
8384         // the return from size() correct.
8385         assert(size ==
8386                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8387                "P-mark and computed size do not agree");
8388       }
8389 #endif
8390 
8391   } else {
8392     // This should be an initialized object that's alive.
8393     assert(oop(addr)->klass_or_null() != NULL &&
8394            (!_collector->should_unload_classes()
8395             || oop(addr)->is_parsable()),
8396            "Should be an initialized object");
8397     // Note that there are objects used during class redefinition,
8398     // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
8399     // which are discarded with their is_conc_safe state still
8400     // false.  These object may be floating garbage so may be
8401     // seen here.  If they are floating garbage their size
8402     // should be attainable from their klass.  Do not that
8403     // is_conc_safe() is true for oop(addr).
8404     // Ignore mark word because we are running concurrent with mutators
8405     assert(oop(addr)->is_oop(true), "live block should be an oop");
8406     // Verify that the bit map has no bits marked between
8407     // addr and purported end of this block.
8408     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8409     assert(size >= 3, "Necessary for Printezis marks to work");
8410     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8411     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8412   }
8413   return size;
8414 }
8415 
8416 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8417                                                  size_t chunkSize) {
8418   // do_post_free_or_garbage_chunk() should only be called in the case
8419   // of the adaptive free list allocator.
8420   const bool fcInFreeLists = fc->is_free();
8421   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8422   assert((HeapWord*)fc <= _limit, "sweep invariant");
8423   if (CMSTestInFreeList && fcInFreeLists) {
8424     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8425   }
8426 
8427   if (CMSTraceSweeper) {
8428     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8429   }
8430 
8431   HeapWord* const fc_addr = (HeapWord*) fc;
8432 
8433   bool coalesce;
8434   const size_t left  = pointer_delta(fc_addr, freeFinger());
8435   const size_t right = chunkSize;
8436   switch (FLSCoalescePolicy) {
8437     // numeric value forms a coalition aggressiveness metric
8438     case 0:  { // never coalesce
8439       coalesce = false;
8440       break;
8441     }
8442     case 1: { // coalesce if left & right chunks on overpopulated lists
8443       coalesce = _sp->coalOverPopulated(left) &&
8444                  _sp->coalOverPopulated(right);
8445       break;
8446     }
8447     case 2: { // coalesce if left chunk on overpopulated list (default)
8448       coalesce = _sp->coalOverPopulated(left);
8449       break;
8450     }
8451     case 3: { // coalesce if left OR right chunk on overpopulated list
8452       coalesce = _sp->coalOverPopulated(left) ||
8453                  _sp->coalOverPopulated(right);
8454       break;
8455     }
8456     case 4: { // always coalesce
8457       coalesce = true;
8458       break;
8459     }
8460     default:
8461      ShouldNotReachHere();
8462   }
8463 
8464   // Should the current free range be coalesced?
8465   // If the chunk is in a free range and either we decided to coalesce above
8466   // or the chunk is near the large block at the end of the heap
8467   // (isNearLargestChunk() returns true), then coalesce this chunk.
8468   const bool doCoalesce = inFreeRange()
8469                           && (coalesce || _g->isNearLargestChunk(fc_addr));
8470   if (doCoalesce) {
8471     // Coalesce the current free range on the left with the new
8472     // chunk on the right.  If either is on a free list,
8473     // it must be removed from the list and stashed in the closure.
8474     if (freeRangeInFreeLists()) {
8475       FreeChunk* const ffc = (FreeChunk*)freeFinger();
8476       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8477         "Size of free range is inconsistent with chunk size.");
8478       if (CMSTestInFreeList) {
8479         assert(_sp->verify_chunk_in_free_list(ffc),
8480           "Chunk is not in free lists");
8481       }
8482       _sp->coalDeath(ffc->size());
8483       _sp->removeFreeChunkFromFreeLists(ffc);
8484       set_freeRangeInFreeLists(false);
8485     }
8486     if (fcInFreeLists) {
8487       _sp->coalDeath(chunkSize);
8488       assert(fc->size() == chunkSize,
8489         "The chunk has the wrong size or is not in the free lists");
8490       _sp->removeFreeChunkFromFreeLists(fc);
8491     }
8492     set_lastFreeRangeCoalesced(true);
8493     print_free_block_coalesced(fc);
8494   } else {  // not in a free range and/or should not coalesce
8495     // Return the current free range and start a new one.
8496     if (inFreeRange()) {
8497       // In a free range but cannot coalesce with the right hand chunk.
8498       // Put the current free range into the free lists.
8499       flush_cur_free_chunk(freeFinger(),
8500                            pointer_delta(fc_addr, freeFinger()));
8501     }
8502     // Set up for new free range.  Pass along whether the right hand
8503     // chunk is in the free lists.
8504     initialize_free_range((HeapWord*)fc, fcInFreeLists);
8505   }
8506 }
8507 
8508 // Lookahead flush:
8509 // If we are tracking a free range, and this is the last chunk that
8510 // we'll look at because its end crosses past _limit, we'll preemptively
8511 // flush it along with any free range we may be holding on to. Note that
8512 // this can be the case only for an already free or freshly garbage
8513 // chunk. If this block is an object, it can never straddle
8514 // over _limit. The "straddling" occurs when _limit is set at
8515 // the previous end of the space when this cycle started, and
8516 // a subsequent heap expansion caused the previously co-terminal
8517 // free block to be coalesced with the newly expanded portion,
8518 // thus rendering _limit a non-block-boundary making it dangerous
8519 // for the sweeper to step over and examine.
8520 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8521   assert(inFreeRange(), "Should only be called if currently in a free range.");
8522   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8523   assert(_sp->used_region().contains(eob - 1),
8524          err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8525                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8526                  _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8527   if (eob >= _limit) {
8528     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8529     if (CMSTraceSweeper) {
8530       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8531                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8532                              "[" PTR_FORMAT "," PTR_FORMAT ")",
8533                              _limit, fc, eob, _sp->bottom(), _sp->end());
8534     }
8535     // Return the storage we are tracking back into the free lists.
8536     if (CMSTraceSweeper) {
8537       gclog_or_tty->print_cr("Flushing ... ");
8538     }
8539     assert(freeFinger() < eob, "Error");
8540     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8541   }
8542 }
8543 
8544 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8545   assert(inFreeRange(), "Should only be called if currently in a free range.");
8546   assert(size > 0,
8547     "A zero sized chunk cannot be added to the free lists.");
8548   if (!freeRangeInFreeLists()) {
8549     if (CMSTestInFreeList) {
8550       FreeChunk* fc = (FreeChunk*) chunk;
8551       fc->set_size(size);
8552       assert(!_sp->verify_chunk_in_free_list(fc),
8553         "chunk should not be in free lists yet");
8554     }
8555     if (CMSTraceSweeper) {
8556       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8557                     chunk, size);
8558     }
8559     // A new free range is going to be starting.  The current
8560     // free range has not been added to the free lists yet or
8561     // was removed so add it back.
8562     // If the current free range was coalesced, then the death
8563     // of the free range was recorded.  Record a birth now.
8564     if (lastFreeRangeCoalesced()) {
8565       _sp->coalBirth(size);
8566     }
8567     _sp->addChunkAndRepairOffsetTable(chunk, size,
8568             lastFreeRangeCoalesced());
8569   } else if (CMSTraceSweeper) {
8570     gclog_or_tty->print_cr("Already in free list: nothing to flush");
8571   }
8572   set_inFreeRange(false);
8573   set_freeRangeInFreeLists(false);
8574 }
8575 
8576 // We take a break if we've been at this for a while,
8577 // so as to avoid monopolizing the locks involved.
8578 void SweepClosure::do_yield_work(HeapWord* addr) {
8579   // Return current free chunk being used for coalescing (if any)
8580   // to the appropriate freelist.  After yielding, the next
8581   // free block encountered will start a coalescing range of
8582   // free blocks.  If the next free block is adjacent to the
8583   // chunk just flushed, they will need to wait for the next
8584   // sweep to be coalesced.
8585   if (inFreeRange()) {
8586     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8587   }
8588 
8589   // First give up the locks, then yield, then re-lock.
8590   // We should probably use a constructor/destructor idiom to
8591   // do this unlock/lock or modify the MutexUnlocker class to
8592   // serve our purpose. XXX
8593   assert_lock_strong(_bitMap->lock());
8594   assert_lock_strong(_freelistLock);
8595   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8596          "CMS thread should hold CMS token");
8597   _bitMap->lock()->unlock();
8598   _freelistLock->unlock();
8599   ConcurrentMarkSweepThread::desynchronize(true);
8600   ConcurrentMarkSweepThread::acknowledge_yield_request();
8601   _collector->stopTimer();
8602   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8603   if (PrintCMSStatistics != 0) {
8604     _collector->incrementYields();
8605   }
8606   _collector->icms_wait();
8607 
8608   // See the comment in coordinator_yield()
8609   for (unsigned i = 0; i < CMSYieldSleepCount &&
8610                        ConcurrentMarkSweepThread::should_yield() &&
8611                        !CMSCollector::foregroundGCIsActive(); ++i) {
8612     os::sleep(Thread::current(), 1, false);
8613     ConcurrentMarkSweepThread::acknowledge_yield_request();
8614   }
8615 
8616   ConcurrentMarkSweepThread::synchronize(true);
8617   _freelistLock->lock();
8618   _bitMap->lock()->lock_without_safepoint_check();
8619   _collector->startTimer();
8620 }
8621 
8622 #ifndef PRODUCT
8623 // This is actually very useful in a product build if it can
8624 // be called from the debugger.  Compile it into the product
8625 // as needed.
8626 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8627   return debug_cms_space->verify_chunk_in_free_list(fc);
8628 }
8629 #endif
8630 
8631 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8632   if (CMSTraceSweeper) {
8633     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8634                            fc, fc->size());
8635   }
8636 }
8637 
8638 // CMSIsAliveClosure
8639 bool CMSIsAliveClosure::do_object_b(oop obj) {
8640   HeapWord* addr = (HeapWord*)obj;
8641   return addr != NULL &&
8642          (!_span.contains(addr) || _bit_map->isMarked(addr));
8643 }
8644 
8645 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8646                       MemRegion span,
8647                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8648                       CMSMarkStack* revisit_stack, bool cpc):
8649   KlassRememberingOopClosure(collector, NULL, revisit_stack),
8650   _span(span),
8651   _bit_map(bit_map),
8652   _mark_stack(mark_stack),
8653   _concurrent_precleaning(cpc) {
8654   assert(!_span.is_empty(), "Empty span could spell trouble");
8655 }
8656 
8657 
8658 // CMSKeepAliveClosure: the serial version
8659 void CMSKeepAliveClosure::do_oop(oop obj) {
8660   HeapWord* addr = (HeapWord*)obj;
8661   if (_span.contains(addr) &&
8662       !_bit_map->isMarked(addr)) {
8663     _bit_map->mark(addr);
8664     bool simulate_overflow = false;
8665     NOT_PRODUCT(
8666       if (CMSMarkStackOverflowALot &&
8667           _collector->simulate_overflow()) {
8668         // simulate a stack overflow
8669         simulate_overflow = true;
8670       }
8671     )
8672     if (simulate_overflow || !_mark_stack->push(obj)) {
8673       if (_concurrent_precleaning) {
8674         // We dirty the overflown object and let the remark
8675         // phase deal with it.
8676         assert(_collector->overflow_list_is_empty(), "Error");
8677         // In the case of object arrays, we need to dirty all of
8678         // the cards that the object spans. No locking or atomics
8679         // are needed since no one else can be mutating the mod union
8680         // table.
8681         if (obj->is_objArray()) {
8682           size_t sz = obj->size();
8683           HeapWord* end_card_addr =
8684             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8685           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8686           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8687           _collector->_modUnionTable.mark_range(redirty_range);
8688         } else {
8689           _collector->_modUnionTable.mark(addr);
8690         }
8691         _collector->_ser_kac_preclean_ovflw++;
8692       } else {
8693         _collector->push_on_overflow_list(obj);
8694         _collector->_ser_kac_ovflw++;
8695       }
8696     }
8697   }
8698 }
8699 
8700 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8701 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8702 
8703 // CMSParKeepAliveClosure: a parallel version of the above.
8704 // The work queues are private to each closure (thread),
8705 // but (may be) available for stealing by other threads.
8706 void CMSParKeepAliveClosure::do_oop(oop obj) {
8707   HeapWord* addr = (HeapWord*)obj;
8708   if (_span.contains(addr) &&
8709       !_bit_map->isMarked(addr)) {
8710     // In general, during recursive tracing, several threads
8711     // may be concurrently getting here; the first one to
8712     // "tag" it, claims it.
8713     if (_bit_map->par_mark(addr)) {
8714       bool res = _work_queue->push(obj);
8715       assert(res, "Low water mark should be much less than capacity");
8716       // Do a recursive trim in the hope that this will keep
8717       // stack usage lower, but leave some oops for potential stealers
8718       trim_queue(_low_water_mark);
8719     } // Else, another thread got there first
8720   }
8721 }
8722 
8723 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8724 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8725 
8726 void CMSParKeepAliveClosure::trim_queue(uint max) {
8727   while (_work_queue->size() > max) {
8728     oop new_oop;
8729     if (_work_queue->pop_local(new_oop)) {
8730       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8731       assert(_bit_map->isMarked((HeapWord*)new_oop),
8732              "no white objects on this stack!");
8733       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8734       // iterate over the oops in this oop, marking and pushing
8735       // the ones in CMS heap (i.e. in _span).
8736       new_oop->oop_iterate(&_mark_and_push);
8737     }
8738   }
8739 }
8740 
8741 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8742                                 CMSCollector* collector,
8743                                 MemRegion span, CMSBitMap* bit_map,
8744                                 CMSMarkStack* revisit_stack,
8745                                 OopTaskQueue* work_queue):
8746   Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
8747   _span(span),
8748   _bit_map(bit_map),
8749   _work_queue(work_queue) { }
8750 
8751 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8752   HeapWord* addr = (HeapWord*)obj;
8753   if (_span.contains(addr) &&
8754       !_bit_map->isMarked(addr)) {
8755     if (_bit_map->par_mark(addr)) {
8756       bool simulate_overflow = false;
8757       NOT_PRODUCT(
8758         if (CMSMarkStackOverflowALot &&
8759             _collector->par_simulate_overflow()) {
8760           // simulate a stack overflow
8761           simulate_overflow = true;
8762         }
8763       )
8764       if (simulate_overflow || !_work_queue->push(obj)) {
8765         _collector->par_push_on_overflow_list(obj);
8766         _collector->_par_kac_ovflw++;
8767       }
8768     } // Else another thread got there already
8769   }
8770 }
8771 
8772 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8773 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8774 
8775 //////////////////////////////////////////////////////////////////
8776 //  CMSExpansionCause                /////////////////////////////
8777 //////////////////////////////////////////////////////////////////
8778 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8779   switch (cause) {
8780     case _no_expansion:
8781       return "No expansion";
8782     case _satisfy_free_ratio:
8783       return "Free ratio";
8784     case _satisfy_promotion:
8785       return "Satisfy promotion";
8786     case _satisfy_allocation:
8787       return "allocation";
8788     case _allocate_par_lab:
8789       return "Par LAB";
8790     case _allocate_par_spooling_space:
8791       return "Par Spooling Space";
8792     case _adaptive_size_policy:
8793       return "Ergonomics";
8794     default:
8795       return "unknown";
8796   }
8797 }
8798 
8799 void CMSDrainMarkingStackClosure::do_void() {
8800   // the max number to take from overflow list at a time
8801   const size_t num = _mark_stack->capacity()/4;
8802   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8803          "Overflow list should be NULL during concurrent phases");
8804   while (!_mark_stack->isEmpty() ||
8805          // if stack is empty, check the overflow list
8806          _collector->take_from_overflow_list(num, _mark_stack)) {
8807     oop obj = _mark_stack->pop();
8808     HeapWord* addr = (HeapWord*)obj;
8809     assert(_span.contains(addr), "Should be within span");
8810     assert(_bit_map->isMarked(addr), "Should be marked");
8811     assert(obj->is_oop(), "Should be an oop");
8812     obj->oop_iterate(_keep_alive);
8813   }
8814 }
8815 
8816 void CMSParDrainMarkingStackClosure::do_void() {
8817   // drain queue
8818   trim_queue(0);
8819 }
8820 
8821 // Trim our work_queue so its length is below max at return
8822 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8823   while (_work_queue->size() > max) {
8824     oop new_oop;
8825     if (_work_queue->pop_local(new_oop)) {
8826       assert(new_oop->is_oop(), "Expected an oop");
8827       assert(_bit_map->isMarked((HeapWord*)new_oop),
8828              "no white objects on this stack!");
8829       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8830       // iterate over the oops in this oop, marking and pushing
8831       // the ones in CMS heap (i.e. in _span).
8832       new_oop->oop_iterate(&_mark_and_push);
8833     }
8834   }
8835 }
8836 
8837 ////////////////////////////////////////////////////////////////////
8838 // Support for Marking Stack Overflow list handling and related code
8839 ////////////////////////////////////////////////////////////////////
8840 // Much of the following code is similar in shape and spirit to the
8841 // code used in ParNewGC. We should try and share that code
8842 // as much as possible in the future.
8843 
8844 #ifndef PRODUCT
8845 // Debugging support for CMSStackOverflowALot
8846 
8847 // It's OK to call this multi-threaded;  the worst thing
8848 // that can happen is that we'll get a bunch of closely
8849 // spaced simulated oveflows, but that's OK, in fact
8850 // probably good as it would exercise the overflow code
8851 // under contention.
8852 bool CMSCollector::simulate_overflow() {
8853   if (_overflow_counter-- <= 0) { // just being defensive
8854     _overflow_counter = CMSMarkStackOverflowInterval;
8855     return true;
8856   } else {
8857     return false;
8858   }
8859 }
8860 
8861 bool CMSCollector::par_simulate_overflow() {
8862   return simulate_overflow();
8863 }
8864 #endif
8865 
8866 // Single-threaded
8867 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8868   assert(stack->isEmpty(), "Expected precondition");
8869   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8870   size_t i = num;
8871   oop  cur = _overflow_list;
8872   const markOop proto = markOopDesc::prototype();
8873   NOT_PRODUCT(ssize_t n = 0;)
8874   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8875     next = oop(cur->mark());
8876     cur->set_mark(proto);   // until proven otherwise
8877     assert(cur->is_oop(), "Should be an oop");
8878     bool res = stack->push(cur);
8879     assert(res, "Bit off more than can chew?");
8880     NOT_PRODUCT(n++;)
8881   }
8882   _overflow_list = cur;
8883 #ifndef PRODUCT
8884   assert(_num_par_pushes >= n, "Too many pops?");
8885   _num_par_pushes -=n;
8886 #endif
8887   return !stack->isEmpty();
8888 }
8889 
8890 #define BUSY  (oop(0x1aff1aff))
8891 // (MT-safe) Get a prefix of at most "num" from the list.
8892 // The overflow list is chained through the mark word of
8893 // each object in the list. We fetch the entire list,
8894 // break off a prefix of the right size and return the
8895 // remainder. If other threads try to take objects from
8896 // the overflow list at that time, they will wait for
8897 // some time to see if data becomes available. If (and
8898 // only if) another thread places one or more object(s)
8899 // on the global list before we have returned the suffix
8900 // to the global list, we will walk down our local list
8901 // to find its end and append the global list to
8902 // our suffix before returning it. This suffix walk can
8903 // prove to be expensive (quadratic in the amount of traffic)
8904 // when there are many objects in the overflow list and
8905 // there is much producer-consumer contention on the list.
8906 // *NOTE*: The overflow list manipulation code here and
8907 // in ParNewGeneration:: are very similar in shape,
8908 // except that in the ParNew case we use the old (from/eden)
8909 // copy of the object to thread the list via its klass word.
8910 // Because of the common code, if you make any changes in
8911 // the code below, please check the ParNew version to see if
8912 // similar changes might be needed.
8913 // CR 6797058 has been filed to consolidate the common code.
8914 bool CMSCollector::par_take_from_overflow_list(size_t num,
8915                                                OopTaskQueue* work_q,
8916                                                int no_of_gc_threads) {
8917   assert(work_q->size() == 0, "First empty local work queue");
8918   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8919   if (_overflow_list == NULL) {
8920     return false;
8921   }
8922   // Grab the entire list; we'll put back a suffix
8923   oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8924   Thread* tid = Thread::current();
8925   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8926   // set to ParallelGCThreads.
8927   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8928   size_t sleep_time_millis = MAX2((size_t)1, num/100);
8929   // If the list is busy, we spin for a short while,
8930   // sleeping between attempts to get the list.
8931   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8932     os::sleep(tid, sleep_time_millis, false);
8933     if (_overflow_list == NULL) {
8934       // Nothing left to take
8935       return false;
8936     } else if (_overflow_list != BUSY) {
8937       // Try and grab the prefix
8938       prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8939     }
8940   }
8941   // If the list was found to be empty, or we spun long
8942   // enough, we give up and return empty-handed. If we leave
8943   // the list in the BUSY state below, it must be the case that
8944   // some other thread holds the overflow list and will set it
8945   // to a non-BUSY state in the future.
8946   if (prefix == NULL || prefix == BUSY) {
8947      // Nothing to take or waited long enough
8948      if (prefix == NULL) {
8949        // Write back the NULL in case we overwrote it with BUSY above
8950        // and it is still the same value.
8951        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8952      }
8953      return false;
8954   }
8955   assert(prefix != NULL && prefix != BUSY, "Error");
8956   size_t i = num;
8957   oop cur = prefix;
8958   // Walk down the first "num" objects, unless we reach the end.
8959   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8960   if (cur->mark() == NULL) {
8961     // We have "num" or fewer elements in the list, so there
8962     // is nothing to return to the global list.
8963     // Write back the NULL in lieu of the BUSY we wrote
8964     // above, if it is still the same value.
8965     if (_overflow_list == BUSY) {
8966       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8967     }
8968   } else {
8969     // Chop off the suffix and rerturn it to the global list.
8970     assert(cur->mark() != BUSY, "Error");
8971     oop suffix_head = cur->mark(); // suffix will be put back on global list
8972     cur->set_mark(NULL);           // break off suffix
8973     // It's possible that the list is still in the empty(busy) state
8974     // we left it in a short while ago; in that case we may be
8975     // able to place back the suffix without incurring the cost
8976     // of a walk down the list.
8977     oop observed_overflow_list = _overflow_list;
8978     oop cur_overflow_list = observed_overflow_list;
8979     bool attached = false;
8980     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8981       observed_overflow_list =
8982         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8983       if (cur_overflow_list == observed_overflow_list) {
8984         attached = true;
8985         break;
8986       } else cur_overflow_list = observed_overflow_list;
8987     }
8988     if (!attached) {
8989       // Too bad, someone else sneaked in (at least) an element; we'll need
8990       // to do a splice. Find tail of suffix so we can prepend suffix to global
8991       // list.
8992       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8993       oop suffix_tail = cur;
8994       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8995              "Tautology");
8996       observed_overflow_list = _overflow_list;
8997       do {
8998         cur_overflow_list = observed_overflow_list;
8999         if (cur_overflow_list != BUSY) {
9000           // Do the splice ...
9001           suffix_tail->set_mark(markOop(cur_overflow_list));
9002         } else { // cur_overflow_list == BUSY
9003           suffix_tail->set_mark(NULL);
9004         }
9005         // ... and try to place spliced list back on overflow_list ...
9006         observed_overflow_list =
9007           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9008       } while (cur_overflow_list != observed_overflow_list);
9009       // ... until we have succeeded in doing so.
9010     }
9011   }
9012 
9013   // Push the prefix elements on work_q
9014   assert(prefix != NULL, "control point invariant");
9015   const markOop proto = markOopDesc::prototype();
9016   oop next;
9017   NOT_PRODUCT(ssize_t n = 0;)
9018   for (cur = prefix; cur != NULL; cur = next) {
9019     next = oop(cur->mark());
9020     cur->set_mark(proto);   // until proven otherwise
9021     assert(cur->is_oop(), "Should be an oop");
9022     bool res = work_q->push(cur);
9023     assert(res, "Bit off more than we can chew?");
9024     NOT_PRODUCT(n++;)
9025   }
9026 #ifndef PRODUCT
9027   assert(_num_par_pushes >= n, "Too many pops?");
9028   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9029 #endif
9030   return true;
9031 }
9032 
9033 // Single-threaded
9034 void CMSCollector::push_on_overflow_list(oop p) {
9035   NOT_PRODUCT(_num_par_pushes++;)
9036   assert(p->is_oop(), "Not an oop");
9037   preserve_mark_if_necessary(p);
9038   p->set_mark((markOop)_overflow_list);
9039   _overflow_list = p;
9040 }
9041 
9042 // Multi-threaded; use CAS to prepend to overflow list
9043 void CMSCollector::par_push_on_overflow_list(oop p) {
9044   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9045   assert(p->is_oop(), "Not an oop");
9046   par_preserve_mark_if_necessary(p);
9047   oop observed_overflow_list = _overflow_list;
9048   oop cur_overflow_list;
9049   do {
9050     cur_overflow_list = observed_overflow_list;
9051     if (cur_overflow_list != BUSY) {
9052       p->set_mark(markOop(cur_overflow_list));
9053     } else {
9054       p->set_mark(NULL);
9055     }
9056     observed_overflow_list =
9057       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9058   } while (cur_overflow_list != observed_overflow_list);
9059 }
9060 #undef BUSY
9061 
9062 // Single threaded
9063 // General Note on GrowableArray: pushes may silently fail
9064 // because we are (temporarily) out of C-heap for expanding
9065 // the stack. The problem is quite ubiquitous and affects
9066 // a lot of code in the JVM. The prudent thing for GrowableArray
9067 // to do (for now) is to exit with an error. However, that may
9068 // be too draconian in some cases because the caller may be
9069 // able to recover without much harm. For such cases, we
9070 // should probably introduce a "soft_push" method which returns
9071 // an indication of success or failure with the assumption that
9072 // the caller may be able to recover from a failure; code in
9073 // the VM can then be changed, incrementally, to deal with such
9074 // failures where possible, thus, incrementally hardening the VM
9075 // in such low resource situations.
9076 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9077   _preserved_oop_stack.push(p);
9078   _preserved_mark_stack.push(m);
9079   assert(m == p->mark(), "Mark word changed");
9080   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9081          "bijection");
9082 }
9083 
9084 // Single threaded
9085 void CMSCollector::preserve_mark_if_necessary(oop p) {
9086   markOop m = p->mark();
9087   if (m->must_be_preserved(p)) {
9088     preserve_mark_work(p, m);
9089   }
9090 }
9091 
9092 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9093   markOop m = p->mark();
9094   if (m->must_be_preserved(p)) {
9095     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9096     // Even though we read the mark word without holding
9097     // the lock, we are assured that it will not change
9098     // because we "own" this oop, so no other thread can
9099     // be trying to push it on the overflow list; see
9100     // the assertion in preserve_mark_work() that checks
9101     // that m == p->mark().
9102     preserve_mark_work(p, m);
9103   }
9104 }
9105 
9106 // We should be able to do this multi-threaded,
9107 // a chunk of stack being a task (this is
9108 // correct because each oop only ever appears
9109 // once in the overflow list. However, it's
9110 // not very easy to completely overlap this with
9111 // other operations, so will generally not be done
9112 // until all work's been completed. Because we
9113 // expect the preserved oop stack (set) to be small,
9114 // it's probably fine to do this single-threaded.
9115 // We can explore cleverer concurrent/overlapped/parallel
9116 // processing of preserved marks if we feel the
9117 // need for this in the future. Stack overflow should
9118 // be so rare in practice and, when it happens, its
9119 // effect on performance so great that this will
9120 // likely just be in the noise anyway.
9121 void CMSCollector::restore_preserved_marks_if_any() {
9122   assert(SafepointSynchronize::is_at_safepoint(),
9123          "world should be stopped");
9124   assert(Thread::current()->is_ConcurrentGC_thread() ||
9125          Thread::current()->is_VM_thread(),
9126          "should be single-threaded");
9127   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9128          "bijection");
9129 
9130   while (!_preserved_oop_stack.is_empty()) {
9131     oop p = _preserved_oop_stack.pop();
9132     assert(p->is_oop(), "Should be an oop");
9133     assert(_span.contains(p), "oop should be in _span");
9134     assert(p->mark() == markOopDesc::prototype(),
9135            "Set when taken from overflow list");
9136     markOop m = _preserved_mark_stack.pop();
9137     p->set_mark(m);
9138   }
9139   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9140          "stacks were cleared above");
9141 }
9142 
9143 #ifndef PRODUCT
9144 bool CMSCollector::no_preserved_marks() const {
9145   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9146 }
9147 #endif
9148 
9149 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9150 {
9151   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9152   CMSAdaptiveSizePolicy* size_policy =
9153     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9154   assert(size_policy->is_gc_cms_adaptive_size_policy(),
9155     "Wrong type for size policy");
9156   return size_policy;
9157 }
9158 
9159 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9160                                            size_t desired_promo_size) {
9161   if (cur_promo_size < desired_promo_size) {
9162     size_t expand_bytes = desired_promo_size - cur_promo_size;
9163     if (PrintAdaptiveSizePolicy && Verbose) {
9164       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9165         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9166         expand_bytes);
9167     }
9168     expand(expand_bytes,
9169            MinHeapDeltaBytes,
9170            CMSExpansionCause::_adaptive_size_policy);
9171   } else if (desired_promo_size < cur_promo_size) {
9172     size_t shrink_bytes = cur_promo_size - desired_promo_size;
9173     if (PrintAdaptiveSizePolicy && Verbose) {
9174       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9175         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9176         shrink_bytes);
9177     }
9178     shrink(shrink_bytes);
9179   }
9180 }
9181 
9182 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9183   GenCollectedHeap* gch = GenCollectedHeap::heap();
9184   CMSGCAdaptivePolicyCounters* counters =
9185     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9186   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9187     "Wrong kind of counters");
9188   return counters;
9189 }
9190 
9191 
9192 void ASConcurrentMarkSweepGeneration::update_counters() {
9193   if (UsePerfData) {
9194     _space_counters->update_all();
9195     _gen_counters->update_all();
9196     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9197     GenCollectedHeap* gch = GenCollectedHeap::heap();
9198     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9199     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9200       "Wrong gc statistics type");
9201     counters->update_counters(gc_stats_l);
9202   }
9203 }
9204 
9205 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9206   if (UsePerfData) {
9207     _space_counters->update_used(used);
9208     _space_counters->update_capacity();
9209     _gen_counters->update_all();
9210 
9211     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9212     GenCollectedHeap* gch = GenCollectedHeap::heap();
9213     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9214     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9215       "Wrong gc statistics type");
9216     counters->update_counters(gc_stats_l);
9217   }
9218 }
9219 
9220 // The desired expansion delta is computed so that:
9221 // . desired free percentage or greater is used
9222 void ASConcurrentMarkSweepGeneration::compute_new_size() {
9223   assert_locked_or_safepoint(Heap_lock);
9224 
9225   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9226 
9227   // If incremental collection failed, we just want to expand
9228   // to the limit.
9229   if (incremental_collection_failed()) {
9230     clear_incremental_collection_failed();
9231     grow_to_reserved();
9232     return;
9233   }
9234 
9235   assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
9236 
9237   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
9238     "Wrong type of heap");
9239   int prev_level = level() - 1;
9240   assert(prev_level >= 0, "The cms generation is the lowest generation");
9241   Generation* prev_gen = gch->get_gen(prev_level);
9242   assert(prev_gen->kind() == Generation::ASParNew,
9243     "Wrong type of young generation");
9244   ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
9245   size_t cur_eden = younger_gen->eden()->capacity();
9246   CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
9247   size_t cur_promo = free();
9248   size_policy->compute_tenured_generation_free_space(cur_promo,
9249                                                        max_available(),
9250                                                        cur_eden);
9251   resize(cur_promo, size_policy->promo_size());
9252 
9253   // Record the new size of the space in the cms generation
9254   // that is available for promotions.  This is temporary.
9255   // It should be the desired promo size.
9256   size_policy->avg_cms_promo()->sample(free());
9257   size_policy->avg_old_live()->sample(used());
9258 
9259   if (UsePerfData) {
9260     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9261     counters->update_cms_capacity_counter(capacity());
9262   }
9263 }
9264 
9265 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9266   assert_locked_or_safepoint(Heap_lock);
9267   assert_lock_strong(freelistLock());
9268   HeapWord* old_end = _cmsSpace->end();
9269   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9270   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9271   FreeChunk* chunk_at_end = find_chunk_at_end();
9272   if (chunk_at_end == NULL) {
9273     // No room to shrink
9274     if (PrintGCDetails && Verbose) {
9275       gclog_or_tty->print_cr("No room to shrink: old_end  "
9276         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
9277         " chunk_at_end  " PTR_FORMAT,
9278         old_end, unallocated_start, chunk_at_end);
9279     }
9280     return;
9281   } else {
9282 
9283     // Find the chunk at the end of the space and determine
9284     // how much it can be shrunk.
9285     size_t shrinkable_size_in_bytes = chunk_at_end->size();
9286     size_t aligned_shrinkable_size_in_bytes =
9287       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9288     assert(unallocated_start <= chunk_at_end->end(),
9289       "Inconsistent chunk at end of space");
9290     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9291     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9292 
9293     // Shrink the underlying space
9294     _virtual_space.shrink_by(bytes);
9295     if (PrintGCDetails && Verbose) {
9296       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9297         " desired_bytes " SIZE_FORMAT
9298         " shrinkable_size_in_bytes " SIZE_FORMAT
9299         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9300         "  bytes  " SIZE_FORMAT,
9301         desired_bytes, shrinkable_size_in_bytes,
9302         aligned_shrinkable_size_in_bytes, bytes);
9303       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
9304         "  unallocated_start  " SIZE_FORMAT,
9305         old_end, unallocated_start);
9306     }
9307 
9308     // If the space did shrink (shrinking is not guaranteed),
9309     // shrink the chunk at the end by the appropriate amount.
9310     if (((HeapWord*)_virtual_space.high()) < old_end) {
9311       size_t new_word_size =
9312         heap_word_size(_virtual_space.committed_size());
9313 
9314       // Have to remove the chunk from the dictionary because it is changing
9315       // size and might be someplace elsewhere in the dictionary.
9316 
9317       // Get the chunk at end, shrink it, and put it
9318       // back.
9319       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9320       size_t word_size_change = word_size_before - new_word_size;
9321       size_t chunk_at_end_old_size = chunk_at_end->size();
9322       assert(chunk_at_end_old_size >= word_size_change,
9323         "Shrink is too large");
9324       chunk_at_end->set_size(chunk_at_end_old_size -
9325                           word_size_change);
9326       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9327         word_size_change);
9328 
9329       _cmsSpace->returnChunkToDictionary(chunk_at_end);
9330 
9331       MemRegion mr(_cmsSpace->bottom(), new_word_size);
9332       _bts->resize(new_word_size);  // resize the block offset shared array
9333       Universe::heap()->barrier_set()->resize_covered_region(mr);
9334       _cmsSpace->assert_locked();
9335       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9336 
9337       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9338 
9339       // update the space and generation capacity counters
9340       if (UsePerfData) {
9341         _space_counters->update_capacity();
9342         _gen_counters->update_all();
9343       }
9344 
9345       if (Verbose && PrintGCDetails) {
9346         size_t new_mem_size = _virtual_space.committed_size();
9347         size_t old_mem_size = new_mem_size + bytes;
9348         gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9349                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9350       }
9351     }
9352 
9353     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9354       "Inconsistency at end of space");
9355     assert(chunk_at_end->end() == _cmsSpace->end(),
9356       "Shrinking is inconsistent");
9357     return;
9358   }
9359 }
9360 
9361 // Transfer some number of overflown objects to usual marking
9362 // stack. Return true if some objects were transferred.
9363 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9364   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9365                     (size_t)ParGCDesiredObjsFromOverflowList);
9366 
9367   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9368   assert(_collector->overflow_list_is_empty() || res,
9369          "If list is not empty, we should have taken something");
9370   assert(!res || !_mark_stack->isEmpty(),
9371          "If we took something, it should now be on our stack");
9372   return res;
9373 }
9374 
9375 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9376   size_t res = _sp->block_size_no_stall(addr, _collector);
9377   if (_sp->block_is_obj(addr)) {
9378     if (_live_bit_map->isMarked(addr)) {
9379       // It can't have been dead in a previous cycle
9380       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9381     } else {
9382       _dead_bit_map->mark(addr);      // mark the dead object
9383     }
9384   }
9385   // Could be 0, if the block size could not be computed without stalling.
9386   return res;
9387 }
9388 
9389 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9390 
9391   switch (phase) {
9392     case CMSCollector::InitialMarking:
9393       initialize(true  /* fullGC */ ,
9394                  cause /* cause of the GC */,
9395                  true  /* recordGCBeginTime */,
9396                  true  /* recordPreGCUsage */,
9397                  false /* recordPeakUsage */,
9398                  false /* recordPostGCusage */,
9399                  true  /* recordAccumulatedGCTime */,
9400                  false /* recordGCEndTime */,
9401                  false /* countCollection */  );
9402       break;
9403 
9404     case CMSCollector::FinalMarking:
9405       initialize(true  /* fullGC */ ,
9406                  cause /* cause of the GC */,
9407                  false /* recordGCBeginTime */,
9408                  false /* recordPreGCUsage */,
9409                  false /* recordPeakUsage */,
9410                  false /* recordPostGCusage */,
9411                  true  /* recordAccumulatedGCTime */,
9412                  false /* recordGCEndTime */,
9413                  false /* countCollection */  );
9414       break;
9415 
9416     case CMSCollector::Sweeping:
9417       initialize(true  /* fullGC */ ,
9418                  cause /* cause of the GC */,
9419                  false /* recordGCBeginTime */,
9420                  false /* recordPreGCUsage */,
9421                  true  /* recordPeakUsage */,
9422                  true  /* recordPostGCusage */,
9423                  false /* recordAccumulatedGCTime */,
9424                  true  /* recordGCEndTime */,
9425                  true  /* countCollection */  );
9426       break;
9427 
9428     default:
9429       ShouldNotReachHere();
9430   }
9431 }
9432