1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  38 #include "gc_implementation/parNew/parNewGeneration.hpp"
  39 #include "gc_implementation/shared/collectorCounters.hpp"
  40 #include "gc_implementation/shared/gcTimer.hpp"
  41 #include "gc_implementation/shared/gcTrace.hpp"
  42 #include "gc_implementation/shared/gcTraceTime.hpp"
  43 #include "gc_implementation/shared/isGCActiveMark.hpp"
  44 #include "gc_interface/collectedHeap.inline.hpp"
  45 #include "memory/allocation.hpp"
  46 #include "memory/cardTableRS.hpp"
  47 #include "memory/collectorPolicy.hpp"
  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.hpp"
  53 #include "memory/referencePolicy.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "memory/tenuredGeneration.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "services/memoryService.hpp"
  63 #include "services/runtimeService.hpp"
  64 
  65 // statics
  66 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  67 bool CMSCollector::_full_gc_requested = false;
  68 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  69 
  70 //////////////////////////////////////////////////////////////////
  71 // In support of CMS/VM thread synchronization
  72 //////////////////////////////////////////////////////////////////
  73 // We split use of the CGC_lock into 2 "levels".
  74 // The low-level locking is of the usual CGC_lock monitor. We introduce
  75 // a higher level "token" (hereafter "CMS token") built on top of the
  76 // low level monitor (hereafter "CGC lock").
  77 // The token-passing protocol gives priority to the VM thread. The
  78 // CMS-lock doesn't provide any fairness guarantees, but clients
  79 // should ensure that it is only held for very short, bounded
  80 // durations.
  81 //
  82 // When either of the CMS thread or the VM thread is involved in
  83 // collection operations during which it does not want the other
  84 // thread to interfere, it obtains the CMS token.
  85 //
  86 // If either thread tries to get the token while the other has
  87 // it, that thread waits. However, if the VM thread and CMS thread
  88 // both want the token, then the VM thread gets priority while the
  89 // CMS thread waits. This ensures, for instance, that the "concurrent"
  90 // phases of the CMS thread's work do not block out the VM thread
  91 // for long periods of time as the CMS thread continues to hog
  92 // the token. (See bug 4616232).
  93 //
  94 // The baton-passing functions are, however, controlled by the
  95 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
  96 // and here the low-level CMS lock, not the high level token,
  97 // ensures mutual exclusion.
  98 //
  99 // Two important conditions that we have to satisfy:
 100 // 1. if a thread does a low-level wait on the CMS lock, then it
 101 //    relinquishes the CMS token if it were holding that token
 102 //    when it acquired the low-level CMS lock.
 103 // 2. any low-level notifications on the low-level lock
 104 //    should only be sent when a thread has relinquished the token.
 105 //
 106 // In the absence of either property, we'd have potential deadlock.
 107 //
 108 // We protect each of the CMS (concurrent and sequential) phases
 109 // with the CMS _token_, not the CMS _lock_.
 110 //
 111 // The only code protected by CMS lock is the token acquisition code
 112 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 113 // baton-passing code.
 114 //
 115 // Unfortunately, i couldn't come up with a good abstraction to factor and
 116 // hide the naked CGC_lock manipulation in the baton-passing code
 117 // further below. That's something we should try to do. Also, the proof
 118 // of correctness of this 2-level locking scheme is far from obvious,
 119 // and potentially quite slippery. We have an uneasy supsicion, for instance,
 120 // that there may be a theoretical possibility of delay/starvation in the
 121 // low-level lock/wait/notify scheme used for the baton-passing because of
 122 // potential intereference with the priority scheme embodied in the
 123 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 124 // invocation further below and marked with "XXX 20011219YSR".
 125 // Indeed, as we note elsewhere, this may become yet more slippery
 126 // in the presence of multiple CMS and/or multiple VM threads. XXX
 127 
 128 class CMSTokenSync: public StackObj {
 129  private:
 130   bool _is_cms_thread;
 131  public:
 132   CMSTokenSync(bool is_cms_thread):
 133     _is_cms_thread(is_cms_thread) {
 134     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 135            "Incorrect argument to constructor");
 136     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 137   }
 138 
 139   ~CMSTokenSync() {
 140     assert(_is_cms_thread ?
 141              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 142              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 143           "Incorrect state");
 144     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 145   }
 146 };
 147 
 148 // Convenience class that does a CMSTokenSync, and then acquires
 149 // upto three locks.
 150 class CMSTokenSyncWithLocks: public CMSTokenSync {
 151  private:
 152   // Note: locks are acquired in textual declaration order
 153   // and released in the opposite order
 154   MutexLockerEx _locker1, _locker2, _locker3;
 155  public:
 156   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 157                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 158     CMSTokenSync(is_cms_thread),
 159     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 160     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 161     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 162   { }
 163 };
 164 
 165 
 166 // Wrapper class to temporarily disable icms during a foreground cms collection.
 167 class ICMSDisabler: public StackObj {
 168  public:
 169   // The ctor disables icms and wakes up the thread so it notices the change;
 170   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 171   // CMSIncrementalMode.
 172   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 173   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 174 };
 175 
 176 //////////////////////////////////////////////////////////////////
 177 //  Concurrent Mark-Sweep Generation /////////////////////////////
 178 //////////////////////////////////////////////////////////////////
 179 
 180 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 181 
 182 // This struct contains per-thread things necessary to support parallel
 183 // young-gen collection.
 184 class CMSParGCThreadState: public CHeapObj<mtGC> {
 185  public:
 186   CFLS_LAB lab;
 187   PromotionInfo promo;
 188 
 189   // Constructor.
 190   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 191     promo.setSpace(cfls);
 192   }
 193 };
 194 
 195 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 196      ReservedSpace rs, size_t initial_byte_size, int level,
 197      CardTableRS* ct, bool use_adaptive_freelists,
 198      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 199   CardGeneration(rs, initial_byte_size, level, ct),
 200   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 201   _debug_collection_type(Concurrent_collection_type),
 202   _did_compact(false)
 203 {
 204   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 205   HeapWord* end    = (HeapWord*) _virtual_space.high();
 206 
 207   _direct_allocated_words = 0;
 208   NOT_PRODUCT(
 209     _numObjectsPromoted = 0;
 210     _numWordsPromoted = 0;
 211     _numObjectsAllocated = 0;
 212     _numWordsAllocated = 0;
 213   )
 214 
 215   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 216                                            use_adaptive_freelists,
 217                                            dictionaryChoice);
 218   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 219   if (_cmsSpace == NULL) {
 220     vm_exit_during_initialization(
 221       "CompactibleFreeListSpace allocation failure");
 222   }
 223   _cmsSpace->_gen = this;
 224 
 225   _gc_stats = new CMSGCStats();
 226 
 227   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 228   // offsets match. The ability to tell free chunks from objects
 229   // depends on this property.
 230   debug_only(
 231     FreeChunk* junk = NULL;
 232     assert(UseCompressedKlassPointers ||
 233            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 234            "Offset of FreeChunk::_prev within FreeChunk must match"
 235            "  that of OopDesc::_klass within OopDesc");
 236   )
 237   if (CollectedHeap::use_parallel_gc_threads()) {
 238     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 239     _par_gc_thread_states =
 240       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 241     if (_par_gc_thread_states == NULL) {
 242       vm_exit_during_initialization("Could not allocate par gc structs");
 243     }
 244     for (uint i = 0; i < ParallelGCThreads; i++) {
 245       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 246       if (_par_gc_thread_states[i] == NULL) {
 247         vm_exit_during_initialization("Could not allocate par gc structs");
 248       }
 249     }
 250   } else {
 251     _par_gc_thread_states = NULL;
 252   }
 253   _incremental_collection_failed = false;
 254   // The "dilatation_factor" is the expansion that can occur on
 255   // account of the fact that the minimum object size in the CMS
 256   // generation may be larger than that in, say, a contiguous young
 257   //  generation.
 258   // Ideally, in the calculation below, we'd compute the dilatation
 259   // factor as: MinChunkSize/(promoting_gen's min object size)
 260   // Since we do not have such a general query interface for the
 261   // promoting generation, we'll instead just use the mimimum
 262   // object size (which today is a header's worth of space);
 263   // note that all arithmetic is in units of HeapWords.
 264   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 265   assert(_dilatation_factor >= 1.0, "from previous assert");
 266 }
 267 
 268 
 269 // The field "_initiating_occupancy" represents the occupancy percentage
 270 // at which we trigger a new collection cycle.  Unless explicitly specified
 271 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 272 // is calculated by:
 273 //
 274 //   Let "f" be MinHeapFreeRatio in
 275 //
 276 //    _intiating_occupancy = 100-f +
 277 //                           f * (CMSTriggerRatio/100)
 278 //   where CMSTriggerRatio is the argument "tr" below.
 279 //
 280 // That is, if we assume the heap is at its desired maximum occupancy at the
 281 // end of a collection, we let CMSTriggerRatio of the (purported) free
 282 // space be allocated before initiating a new collection cycle.
 283 //
 284 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 285   assert(io <= 100 && tr <= 100, "Check the arguments");
 286   if (io >= 0) {
 287     _initiating_occupancy = (double)io / 100.0;
 288   } else {
 289     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 290                              (double)(tr * MinHeapFreeRatio) / 100.0)
 291                             / 100.0;
 292   }
 293 }
 294 
 295 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 296   assert(collector() != NULL, "no collector");
 297   collector()->ref_processor_init();
 298 }
 299 
 300 void CMSCollector::ref_processor_init() {
 301   if (_ref_processor == NULL) {
 302     // Allocate and initialize a reference processor
 303     _ref_processor =
 304       new ReferenceProcessor(_span,                               // span
 305                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 306                              (int) ParallelGCThreads,             // mt processing degree
 307                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 308                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 309                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 310                              &_is_alive_closure,                  // closure for liveness info
 311                              false);                              // next field updates do not need write barrier
 312     // Initialize the _ref_processor field of CMSGen
 313     _cmsGen->set_ref_processor(_ref_processor);
 314 
 315   }
 316 }
 317 
 318 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 319   GenCollectedHeap* gch = GenCollectedHeap::heap();
 320   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 321     "Wrong type of heap");
 322   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 323     gch->gen_policy()->size_policy();
 324   assert(sp->is_gc_cms_adaptive_size_policy(),
 325     "Wrong type of size policy");
 326   return sp;
 327 }
 328 
 329 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
 330   CMSGCAdaptivePolicyCounters* results =
 331     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
 332   assert(
 333     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
 334     "Wrong gc policy counter kind");
 335   return results;
 336 }
 337 
 338 
 339 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 340 
 341   const char* gen_name = "old";
 342 
 343   // Generation Counters - generation 1, 1 subspace
 344   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
 345 
 346   _space_counters = new GSpaceCounters(gen_name, 0,
 347                                        _virtual_space.reserved_size(),
 348                                        this, _gen_counters);
 349 }
 350 
 351 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 352   _cms_gen(cms_gen)
 353 {
 354   assert(alpha <= 100, "bad value");
 355   _saved_alpha = alpha;
 356 
 357   // Initialize the alphas to the bootstrap value of 100.
 358   _gc0_alpha = _cms_alpha = 100;
 359 
 360   _cms_begin_time.update();
 361   _cms_end_time.update();
 362 
 363   _gc0_duration = 0.0;
 364   _gc0_period = 0.0;
 365   _gc0_promoted = 0;
 366 
 367   _cms_duration = 0.0;
 368   _cms_period = 0.0;
 369   _cms_allocated = 0;
 370 
 371   _cms_used_at_gc0_begin = 0;
 372   _cms_used_at_gc0_end = 0;
 373   _allow_duty_cycle_reduction = false;
 374   _valid_bits = 0;
 375   _icms_duty_cycle = CMSIncrementalDutyCycle;
 376 }
 377 
 378 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 379   // TBD: CR 6909490
 380   return 1.0;
 381 }
 382 
 383 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 384 }
 385 
 386 // If promotion failure handling is on use
 387 // the padded average size of the promotion for each
 388 // young generation collection.
 389 double CMSStats::time_until_cms_gen_full() const {
 390   size_t cms_free = _cms_gen->cmsSpace()->free();
 391   GenCollectedHeap* gch = GenCollectedHeap::heap();
 392   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 393                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 394   if (cms_free > expected_promotion) {
 395     // Start a cms collection if there isn't enough space to promote
 396     // for the next minor collection.  Use the padded average as
 397     // a safety factor.
 398     cms_free -= expected_promotion;
 399 
 400     // Adjust by the safety factor.
 401     double cms_free_dbl = (double)cms_free;
 402     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 403     // Apply a further correction factor which tries to adjust
 404     // for recent occurance of concurrent mode failures.
 405     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 406     cms_free_dbl = cms_free_dbl * cms_adjustment;
 407 
 408     if (PrintGCDetails && Verbose) {
 409       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 410         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 411         cms_free, expected_promotion);
 412       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 413         cms_free_dbl, cms_consumption_rate() + 1.0);
 414     }
 415     // Add 1 in case the consumption rate goes to zero.
 416     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 417   }
 418   return 0.0;
 419 }
 420 
 421 // Compare the duration of the cms collection to the
 422 // time remaining before the cms generation is empty.
 423 // Note that the time from the start of the cms collection
 424 // to the start of the cms sweep (less than the total
 425 // duration of the cms collection) can be used.  This
 426 // has been tried and some applications experienced
 427 // promotion failures early in execution.  This was
 428 // possibly because the averages were not accurate
 429 // enough at the beginning.
 430 double CMSStats::time_until_cms_start() const {
 431   // We add "gc0_period" to the "work" calculation
 432   // below because this query is done (mostly) at the
 433   // end of a scavenge, so we need to conservatively
 434   // account for that much possible delay
 435   // in the query so as to avoid concurrent mode failures
 436   // due to starting the collection just a wee bit too
 437   // late.
 438   double work = cms_duration() + gc0_period();
 439   double deadline = time_until_cms_gen_full();
 440   // If a concurrent mode failure occurred recently, we want to be
 441   // more conservative and halve our expected time_until_cms_gen_full()
 442   if (work > deadline) {
 443     if (Verbose && PrintGCDetails) {
 444       gclog_or_tty->print(
 445         " CMSCollector: collect because of anticipated promotion "
 446         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 447         gc0_period(), time_until_cms_gen_full());
 448     }
 449     return 0.0;
 450   }
 451   return work - deadline;
 452 }
 453 
 454 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 455 // amount of change to prevent wild oscillation.
 456 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 457                                               unsigned int new_duty_cycle) {
 458   assert(old_duty_cycle <= 100, "bad input value");
 459   assert(new_duty_cycle <= 100, "bad input value");
 460 
 461   // Note:  use subtraction with caution since it may underflow (values are
 462   // unsigned).  Addition is safe since we're in the range 0-100.
 463   unsigned int damped_duty_cycle = new_duty_cycle;
 464   if (new_duty_cycle < old_duty_cycle) {
 465     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 466     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 467       damped_duty_cycle = old_duty_cycle - largest_delta;
 468     }
 469   } else if (new_duty_cycle > old_duty_cycle) {
 470     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 471     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 472       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 473     }
 474   }
 475   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 476 
 477   if (CMSTraceIncrementalPacing) {
 478     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 479                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 480   }
 481   return damped_duty_cycle;
 482 }
 483 
 484 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 485   assert(CMSIncrementalPacing && valid(),
 486          "should be handled in icms_update_duty_cycle()");
 487 
 488   double cms_time_so_far = cms_timer().seconds();
 489   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 490   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 491 
 492   // Avoid division by 0.
 493   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 494   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 495 
 496   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 497   if (new_duty_cycle > _icms_duty_cycle) {
 498     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 499     if (new_duty_cycle > 2) {
 500       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 501                                                 new_duty_cycle);
 502     }
 503   } else if (_allow_duty_cycle_reduction) {
 504     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 505     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 506     // Respect the minimum duty cycle.
 507     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 508     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 509   }
 510 
 511   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 512     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 513   }
 514 
 515   _allow_duty_cycle_reduction = false;
 516   return _icms_duty_cycle;
 517 }
 518 
 519 #ifndef PRODUCT
 520 void CMSStats::print_on(outputStream *st) const {
 521   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 522   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 523                gc0_duration(), gc0_period(), gc0_promoted());
 524   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 525             cms_duration(), cms_duration_per_mb(),
 526             cms_period(), cms_allocated());
 527   st->print(",cms_since_beg=%g,cms_since_end=%g",
 528             cms_time_since_begin(), cms_time_since_end());
 529   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 530             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 531   if (CMSIncrementalMode) {
 532     st->print(",dc=%d", icms_duty_cycle());
 533   }
 534 
 535   if (valid()) {
 536     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 537               promotion_rate(), cms_allocation_rate());
 538     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 539               cms_consumption_rate(), time_until_cms_gen_full());
 540   }
 541   st->print(" ");
 542 }
 543 #endif // #ifndef PRODUCT
 544 
 545 CMSCollector::CollectorState CMSCollector::_collectorState =
 546                              CMSCollector::Idling;
 547 bool CMSCollector::_foregroundGCIsActive = false;
 548 bool CMSCollector::_foregroundGCShouldWait = false;
 549 
 550 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 551                            CardTableRS*                   ct,
 552                            ConcurrentMarkSweepPolicy*     cp):
 553   _cmsGen(cmsGen),
 554   _ct(ct),
 555   _ref_processor(NULL),    // will be set later
 556   _conc_workers(NULL),     // may be set later
 557   _abort_preclean(false),
 558   _start_sampling(false),
 559   _between_prologue_and_epilogue(false),
 560   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 561   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 562                  -1 /* lock-free */, "No_lock" /* dummy */),
 563   _modUnionClosure(&_modUnionTable),
 564   _modUnionClosurePar(&_modUnionTable),
 565   // Adjust my span to cover old (cms) gen
 566   _span(cmsGen->reserved()),
 567   // Construct the is_alive_closure with _span & markBitMap
 568   _is_alive_closure(_span, &_markBitMap),
 569   _restart_addr(NULL),
 570   _overflow_list(NULL),
 571   _stats(cmsGen),
 572   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 573   _eden_chunk_array(NULL),     // may be set in ctor body
 574   _eden_chunk_capacity(0),     // -- ditto --
 575   _eden_chunk_index(0),        // -- ditto --
 576   _survivor_plab_array(NULL),  // -- ditto --
 577   _survivor_chunk_array(NULL), // -- ditto --
 578   _survivor_chunk_capacity(0), // -- ditto --
 579   _survivor_chunk_index(0),    // -- ditto --
 580   _ser_pmc_preclean_ovflw(0),
 581   _ser_kac_preclean_ovflw(0),
 582   _ser_pmc_remark_ovflw(0),
 583   _par_pmc_remark_ovflw(0),
 584   _ser_kac_ovflw(0),
 585   _par_kac_ovflw(0),
 586 #ifndef PRODUCT
 587   _num_par_pushes(0),
 588 #endif
 589   _collection_count_start(0),
 590   _verifying(false),
 591   _icms_start_limit(NULL),
 592   _icms_stop_limit(NULL),
 593   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 594   _completed_initialization(false),
 595   _collector_policy(cp),
 596   _should_unload_classes(false),
 597   _concurrent_cycles_since_last_unload(0),
 598   _roots_scanning_options(0),
 599   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 600   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 601   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 602   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 603   _cms_start_registered(false)
 604 {
 605   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 606     ExplicitGCInvokesConcurrent = true;
 607   }
 608   // Now expand the span and allocate the collection support structures
 609   // (MUT, marking bit map etc.) to cover both generations subject to
 610   // collection.
 611 
 612   // For use by dirty card to oop closures.
 613   _cmsGen->cmsSpace()->set_collector(this);
 614 
 615   // Allocate MUT and marking bit map
 616   {
 617     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 618     if (!_markBitMap.allocate(_span)) {
 619       warning("Failed to allocate CMS Bit Map");
 620       return;
 621     }
 622     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 623   }
 624   {
 625     _modUnionTable.allocate(_span);
 626     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 627   }
 628 
 629   if (!_markStack.allocate(MarkStackSize)) {
 630     warning("Failed to allocate CMS Marking Stack");
 631     return;
 632   }
 633 
 634   // Support for multi-threaded concurrent phases
 635   if (CMSConcurrentMTEnabled) {
 636     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 637       // just for now
 638       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 639     }
 640     if (ConcGCThreads > 1) {
 641       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 642                                  ConcGCThreads, true);
 643       if (_conc_workers == NULL) {
 644         warning("GC/CMS: _conc_workers allocation failure: "
 645               "forcing -CMSConcurrentMTEnabled");
 646         CMSConcurrentMTEnabled = false;
 647       } else {
 648         _conc_workers->initialize_workers();
 649       }
 650     } else {
 651       CMSConcurrentMTEnabled = false;
 652     }
 653   }
 654   if (!CMSConcurrentMTEnabled) {
 655     ConcGCThreads = 0;
 656   } else {
 657     // Turn off CMSCleanOnEnter optimization temporarily for
 658     // the MT case where it's not fixed yet; see 6178663.
 659     CMSCleanOnEnter = false;
 660   }
 661   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 662          "Inconsistency");
 663 
 664   // Parallel task queues; these are shared for the
 665   // concurrent and stop-world phases of CMS, but
 666   // are not shared with parallel scavenge (ParNew).
 667   {
 668     uint i;
 669     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 670 
 671     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 672          || ParallelRefProcEnabled)
 673         && num_queues > 0) {
 674       _task_queues = new OopTaskQueueSet(num_queues);
 675       if (_task_queues == NULL) {
 676         warning("task_queues allocation failure.");
 677         return;
 678       }
 679       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 680       if (_hash_seed == NULL) {
 681         warning("_hash_seed array allocation failure");
 682         return;
 683       }
 684 
 685       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 686       for (i = 0; i < num_queues; i++) {
 687         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 688         if (q == NULL) {
 689           warning("work_queue allocation failure.");
 690           return;
 691         }
 692         _task_queues->register_queue(i, q);
 693       }
 694       for (i = 0; i < num_queues; i++) {
 695         _task_queues->queue(i)->initialize();
 696         _hash_seed[i] = 17;  // copied from ParNew
 697       }
 698     }
 699   }
 700 
 701   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 702 
 703   // Clip CMSBootstrapOccupancy between 0 and 100.
 704   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 705 
 706   _full_gcs_since_conc_gc = 0;
 707 
 708   // Now tell CMS generations the identity of their collector
 709   ConcurrentMarkSweepGeneration::set_collector(this);
 710 
 711   // Create & start a CMS thread for this CMS collector
 712   _cmsThread = ConcurrentMarkSweepThread::start(this);
 713   assert(cmsThread() != NULL, "CMS Thread should have been created");
 714   assert(cmsThread()->collector() == this,
 715          "CMS Thread should refer to this gen");
 716   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 717 
 718   // Support for parallelizing young gen rescan
 719   GenCollectedHeap* gch = GenCollectedHeap::heap();
 720   _young_gen = gch->prev_gen(_cmsGen);
 721   if (gch->supports_inline_contig_alloc()) {
 722     _top_addr = gch->top_addr();
 723     _end_addr = gch->end_addr();
 724     assert(_young_gen != NULL, "no _young_gen");
 725     _eden_chunk_index = 0;
 726     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 727     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 728     if (_eden_chunk_array == NULL) {
 729       _eden_chunk_capacity = 0;
 730       warning("GC/CMS: _eden_chunk_array allocation failure");
 731     }
 732   }
 733   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 734 
 735   // Support for parallelizing survivor space rescan
 736   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 737     const size_t max_plab_samples =
 738       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 739 
 740     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 741     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 742     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 743     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 744         || _cursor == NULL) {
 745       warning("Failed to allocate survivor plab/chunk array");
 746       if (_survivor_plab_array  != NULL) {
 747         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 748         _survivor_plab_array = NULL;
 749       }
 750       if (_survivor_chunk_array != NULL) {
 751         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 752         _survivor_chunk_array = NULL;
 753       }
 754       if (_cursor != NULL) {
 755         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 756         _cursor = NULL;
 757       }
 758     } else {
 759       _survivor_chunk_capacity = 2*max_plab_samples;
 760       for (uint i = 0; i < ParallelGCThreads; i++) {
 761         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 762         if (vec == NULL) {
 763           warning("Failed to allocate survivor plab array");
 764           for (int j = i; j > 0; j--) {
 765             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
 766           }
 767           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 768           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 769           _survivor_plab_array = NULL;
 770           _survivor_chunk_array = NULL;
 771           _survivor_chunk_capacity = 0;
 772           break;
 773         } else {
 774           ChunkArray* cur =
 775             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 776                                                         max_plab_samples);
 777           assert(cur->end() == 0, "Should be 0");
 778           assert(cur->array() == vec, "Should be vec");
 779           assert(cur->capacity() == max_plab_samples, "Error");
 780         }
 781       }
 782     }
 783   }
 784   assert(   (   _survivor_plab_array  != NULL
 785              && _survivor_chunk_array != NULL)
 786          || (   _survivor_chunk_capacity == 0
 787              && _survivor_chunk_index == 0),
 788          "Error");
 789 
 790   // Choose what strong roots should be scanned depending on verification options
 791   if (!CMSClassUnloadingEnabled) {
 792     // If class unloading is disabled we want to include all classes into the root set.
 793     add_root_scanning_option(SharedHeap::SO_AllClasses);
 794   } else {
 795     add_root_scanning_option(SharedHeap::SO_SystemClasses);
 796   }
 797 
 798   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 799   _gc_counters = new CollectorCounters("CMS", 1);
 800   _completed_initialization = true;
 801   _inter_sweep_timer.start();  // start of time
 802 }
 803 
 804 const char* ConcurrentMarkSweepGeneration::name() const {
 805   return "concurrent mark-sweep generation";
 806 }
 807 void ConcurrentMarkSweepGeneration::update_counters() {
 808   if (UsePerfData) {
 809     _space_counters->update_all();
 810     _gen_counters->update_all();
 811   }
 812 }
 813 
 814 // this is an optimized version of update_counters(). it takes the
 815 // used value as a parameter rather than computing it.
 816 //
 817 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 818   if (UsePerfData) {
 819     _space_counters->update_used(used);
 820     _space_counters->update_capacity();
 821     _gen_counters->update_all();
 822   }
 823 }
 824 
 825 void ConcurrentMarkSweepGeneration::print() const {
 826   Generation::print();
 827   cmsSpace()->print();
 828 }
 829 
 830 #ifndef PRODUCT
 831 void ConcurrentMarkSweepGeneration::print_statistics() {
 832   cmsSpace()->printFLCensus(0);
 833 }
 834 #endif
 835 
 836 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 837   GenCollectedHeap* gch = GenCollectedHeap::heap();
 838   if (PrintGCDetails) {
 839     if (Verbose) {
 840       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 841         level(), short_name(), s, used(), capacity());
 842     } else {
 843       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 844         level(), short_name(), s, used() / K, capacity() / K);
 845     }
 846   }
 847   if (Verbose) {
 848     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 849               gch->used(), gch->capacity());
 850   } else {
 851     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 852               gch->used() / K, gch->capacity() / K);
 853   }
 854 }
 855 
 856 size_t
 857 ConcurrentMarkSweepGeneration::contiguous_available() const {
 858   // dld proposes an improvement in precision here. If the committed
 859   // part of the space ends in a free block we should add that to
 860   // uncommitted size in the calculation below. Will make this
 861   // change later, staying with the approximation below for the
 862   // time being. -- ysr.
 863   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 864 }
 865 
 866 size_t
 867 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 868   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 869 }
 870 
 871 size_t ConcurrentMarkSweepGeneration::max_available() const {
 872   return free() + _virtual_space.uncommitted_size();
 873 }
 874 
 875 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 876   size_t available = max_available();
 877   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 878   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 879   if (Verbose && PrintGCDetails) {
 880     gclog_or_tty->print_cr(
 881       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 882       "max_promo("SIZE_FORMAT")",
 883       res? "":" not", available, res? ">=":"<",
 884       av_promo, max_promotion_in_bytes);
 885   }
 886   return res;
 887 }
 888 
 889 // At a promotion failure dump information on block layout in heap
 890 // (cms old generation).
 891 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 892   if (CMSDumpAtPromotionFailure) {
 893     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 894   }
 895 }
 896 
 897 CompactibleSpace*
 898 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 899   return _cmsSpace;
 900 }
 901 
 902 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 903   // Clear the promotion information.  These pointers can be adjusted
 904   // along with all the other pointers into the heap but
 905   // compaction is expected to be a rare event with
 906   // a heap using cms so don't do it without seeing the need.
 907   if (CollectedHeap::use_parallel_gc_threads()) {
 908     for (uint i = 0; i < ParallelGCThreads; i++) {
 909       _par_gc_thread_states[i]->promo.reset();
 910     }
 911   }
 912 }
 913 
 914 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
 915   blk->do_space(_cmsSpace);
 916 }
 917 
 918 void ConcurrentMarkSweepGeneration::compute_new_size() {
 919   assert_locked_or_safepoint(Heap_lock);
 920 
 921   // If incremental collection failed, we just want to expand
 922   // to the limit.
 923   if (incremental_collection_failed()) {
 924     clear_incremental_collection_failed();
 925     grow_to_reserved();
 926     return;
 927   }
 928 
 929   // The heap has been compacted but not reset yet.
 930   // Any metric such as free() or used() will be incorrect.
 931 
 932   CardGeneration::compute_new_size();
 933 
 934   // Reset again after a possible resizing
 935   if (did_compact()) {
 936     cmsSpace()->reset_after_compaction();
 937   }
 938 }
 939 
 940 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 941   assert_locked_or_safepoint(Heap_lock);
 942 
 943   // If incremental collection failed, we just want to expand
 944   // to the limit.
 945   if (incremental_collection_failed()) {
 946     clear_incremental_collection_failed();
 947     grow_to_reserved();
 948     return;
 949   }
 950 
 951   double free_percentage = ((double) free()) / capacity();
 952   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 953   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 954 
 955   // compute expansion delta needed for reaching desired free percentage
 956   if (free_percentage < desired_free_percentage) {
 957     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 958     assert(desired_capacity >= capacity(), "invalid expansion size");
 959     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 960     if (PrintGCDetails && Verbose) {
 961       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 962       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 963       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 964       gclog_or_tty->print_cr("  Desired free fraction %f",
 965         desired_free_percentage);
 966       gclog_or_tty->print_cr("  Maximum free fraction %f",
 967         maximum_free_percentage);
 968       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
 969       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 970         desired_capacity/1000);
 971       int prev_level = level() - 1;
 972       if (prev_level >= 0) {
 973         size_t prev_size = 0;
 974         GenCollectedHeap* gch = GenCollectedHeap::heap();
 975         Generation* prev_gen = gch->_gens[prev_level];
 976         prev_size = prev_gen->capacity();
 977           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 978                                  prev_size/1000);
 979       }
 980       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 981         unsafe_max_alloc_nogc()/1000);
 982       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 983         contiguous_available()/1000);
 984       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 985         expand_bytes);
 986     }
 987     // safe if expansion fails
 988     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 989     if (PrintGCDetails && Verbose) {
 990       gclog_or_tty->print_cr("  Expanded free fraction %f",
 991         ((double) free()) / capacity());
 992     }
 993   } else {
 994     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 995     assert(desired_capacity <= capacity(), "invalid expansion size");
 996     size_t shrink_bytes = capacity() - desired_capacity;
 997     // Don't shrink unless the delta is greater than the minimum shrink we want
 998     if (shrink_bytes >= MinHeapDeltaBytes) {
 999       shrink_free_list_by(shrink_bytes);
1000     }
1001   }
1002 }
1003 
1004 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1005   return cmsSpace()->freelistLock();
1006 }
1007 
1008 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1009                                                   bool   tlab) {
1010   CMSSynchronousYieldRequest yr;
1011   MutexLockerEx x(freelistLock(),
1012                   Mutex::_no_safepoint_check_flag);
1013   return have_lock_and_allocate(size, tlab);
1014 }
1015 
1016 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1017                                                   bool   tlab /* ignored */) {
1018   assert_lock_strong(freelistLock());
1019   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1020   HeapWord* res = cmsSpace()->allocate(adjustedSize);
1021   // Allocate the object live (grey) if the background collector has
1022   // started marking. This is necessary because the marker may
1023   // have passed this address and consequently this object will
1024   // not otherwise be greyed and would be incorrectly swept up.
1025   // Note that if this object contains references, the writing
1026   // of those references will dirty the card containing this object
1027   // allowing the object to be blackened (and its references scanned)
1028   // either during a preclean phase or at the final checkpoint.
1029   if (res != NULL) {
1030     // We may block here with an uninitialized object with
1031     // its mark-bit or P-bits not yet set. Such objects need
1032     // to be safely navigable by block_start().
1033     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1034     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1035     collector()->direct_allocated(res, adjustedSize);
1036     _direct_allocated_words += adjustedSize;
1037     // allocation counters
1038     NOT_PRODUCT(
1039       _numObjectsAllocated++;
1040       _numWordsAllocated += (int)adjustedSize;
1041     )
1042   }
1043   return res;
1044 }
1045 
1046 // In the case of direct allocation by mutators in a generation that
1047 // is being concurrently collected, the object must be allocated
1048 // live (grey) if the background collector has started marking.
1049 // This is necessary because the marker may
1050 // have passed this address and consequently this object will
1051 // not otherwise be greyed and would be incorrectly swept up.
1052 // Note that if this object contains references, the writing
1053 // of those references will dirty the card containing this object
1054 // allowing the object to be blackened (and its references scanned)
1055 // either during a preclean phase or at the final checkpoint.
1056 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1057   assert(_markBitMap.covers(start, size), "Out of bounds");
1058   if (_collectorState >= Marking) {
1059     MutexLockerEx y(_markBitMap.lock(),
1060                     Mutex::_no_safepoint_check_flag);
1061     // [see comments preceding SweepClosure::do_blk() below for details]
1062     //
1063     // Can the P-bits be deleted now?  JJJ
1064     //
1065     // 1. need to mark the object as live so it isn't collected
1066     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1067     // 3. need to mark the end of the object so marking, precleaning or sweeping
1068     //    can skip over uninitialized or unparsable objects. An allocated
1069     //    object is considered uninitialized for our purposes as long as
1070     //    its klass word is NULL.  All old gen objects are parsable
1071     //    as soon as they are initialized.)
1072     _markBitMap.mark(start);          // object is live
1073     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
1074     _markBitMap.mark(start + size - 1);
1075                                       // mark end of object
1076   }
1077   // check that oop looks uninitialized
1078   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1079 }
1080 
1081 void CMSCollector::promoted(bool par, HeapWord* start,
1082                             bool is_obj_array, size_t obj_size) {
1083   assert(_markBitMap.covers(start), "Out of bounds");
1084   // See comment in direct_allocated() about when objects should
1085   // be allocated live.
1086   if (_collectorState >= Marking) {
1087     // we already hold the marking bit map lock, taken in
1088     // the prologue
1089     if (par) {
1090       _markBitMap.par_mark(start);
1091     } else {
1092       _markBitMap.mark(start);
1093     }
1094     // We don't need to mark the object as uninitialized (as
1095     // in direct_allocated above) because this is being done with the
1096     // world stopped and the object will be initialized by the
1097     // time the marking, precleaning or sweeping get to look at it.
1098     // But see the code for copying objects into the CMS generation,
1099     // where we need to ensure that concurrent readers of the
1100     // block offset table are able to safely navigate a block that
1101     // is in flux from being free to being allocated (and in
1102     // transition while being copied into) and subsequently
1103     // becoming a bona-fide object when the copy/promotion is complete.
1104     assert(SafepointSynchronize::is_at_safepoint(),
1105            "expect promotion only at safepoints");
1106 
1107     if (_collectorState < Sweeping) {
1108       // Mark the appropriate cards in the modUnionTable, so that
1109       // this object gets scanned before the sweep. If this is
1110       // not done, CMS generation references in the object might
1111       // not get marked.
1112       // For the case of arrays, which are otherwise precisely
1113       // marked, we need to dirty the entire array, not just its head.
1114       if (is_obj_array) {
1115         // The [par_]mark_range() method expects mr.end() below to
1116         // be aligned to the granularity of a bit's representation
1117         // in the heap. In the case of the MUT below, that's a
1118         // card size.
1119         MemRegion mr(start,
1120                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1121                         CardTableModRefBS::card_size /* bytes */));
1122         if (par) {
1123           _modUnionTable.par_mark_range(mr);
1124         } else {
1125           _modUnionTable.mark_range(mr);
1126         }
1127       } else {  // not an obj array; we can just mark the head
1128         if (par) {
1129           _modUnionTable.par_mark(start);
1130         } else {
1131           _modUnionTable.mark(start);
1132         }
1133       }
1134     }
1135   }
1136 }
1137 
1138 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1139 {
1140   size_t delta = pointer_delta(addr, space->bottom());
1141   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1142 }
1143 
1144 void CMSCollector::icms_update_allocation_limits()
1145 {
1146   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1147   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1148 
1149   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1150   if (CMSTraceIncrementalPacing) {
1151     stats().print();
1152   }
1153 
1154   assert(duty_cycle <= 100, "invalid duty cycle");
1155   if (duty_cycle != 0) {
1156     // The duty_cycle is a percentage between 0 and 100; convert to words and
1157     // then compute the offset from the endpoints of the space.
1158     size_t free_words = eden->free() / HeapWordSize;
1159     double free_words_dbl = (double)free_words;
1160     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1161     size_t offset_words = (free_words - duty_cycle_words) / 2;
1162 
1163     _icms_start_limit = eden->top() + offset_words;
1164     _icms_stop_limit = eden->end() - offset_words;
1165 
1166     // The limits may be adjusted (shifted to the right) by
1167     // CMSIncrementalOffset, to allow the application more mutator time after a
1168     // young gen gc (when all mutators were stopped) and before CMS starts and
1169     // takes away one or more cpus.
1170     if (CMSIncrementalOffset != 0) {
1171       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1172       size_t adjustment = (size_t)adjustment_dbl;
1173       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1174       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1175         _icms_start_limit += adjustment;
1176         _icms_stop_limit = tmp_stop;
1177       }
1178     }
1179   }
1180   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1181     _icms_start_limit = _icms_stop_limit = eden->end();
1182   }
1183 
1184   // Install the new start limit.
1185   eden->set_soft_end(_icms_start_limit);
1186 
1187   if (CMSTraceIncrementalMode) {
1188     gclog_or_tty->print(" icms alloc limits:  "
1189                            PTR_FORMAT "," PTR_FORMAT
1190                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1191                            _icms_start_limit, _icms_stop_limit,
1192                            percent_of_space(eden, _icms_start_limit),
1193                            percent_of_space(eden, _icms_stop_limit));
1194     if (Verbose) {
1195       gclog_or_tty->print("eden:  ");
1196       eden->print_on(gclog_or_tty);
1197     }
1198   }
1199 }
1200 
1201 // Any changes here should try to maintain the invariant
1202 // that if this method is called with _icms_start_limit
1203 // and _icms_stop_limit both NULL, then it should return NULL
1204 // and not notify the icms thread.
1205 HeapWord*
1206 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1207                                        size_t word_size)
1208 {
1209   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1210   // nop.
1211   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1212     if (top <= _icms_start_limit) {
1213       if (CMSTraceIncrementalMode) {
1214         space->print_on(gclog_or_tty);
1215         gclog_or_tty->stamp();
1216         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1217                                ", new limit=" PTR_FORMAT
1218                                " (" SIZE_FORMAT "%%)",
1219                                top, _icms_stop_limit,
1220                                percent_of_space(space, _icms_stop_limit));
1221       }
1222       ConcurrentMarkSweepThread::start_icms();
1223       assert(top < _icms_stop_limit, "Tautology");
1224       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1225         return _icms_stop_limit;
1226       }
1227 
1228       // The allocation will cross both the _start and _stop limits, so do the
1229       // stop notification also and return end().
1230       if (CMSTraceIncrementalMode) {
1231         space->print_on(gclog_or_tty);
1232         gclog_or_tty->stamp();
1233         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1234                                ", new limit=" PTR_FORMAT
1235                                " (" SIZE_FORMAT "%%)",
1236                                top, space->end(),
1237                                percent_of_space(space, space->end()));
1238       }
1239       ConcurrentMarkSweepThread::stop_icms();
1240       return space->end();
1241     }
1242 
1243     if (top <= _icms_stop_limit) {
1244       if (CMSTraceIncrementalMode) {
1245         space->print_on(gclog_or_tty);
1246         gclog_or_tty->stamp();
1247         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1248                                ", new limit=" PTR_FORMAT
1249                                " (" SIZE_FORMAT "%%)",
1250                                top, space->end(),
1251                                percent_of_space(space, space->end()));
1252       }
1253       ConcurrentMarkSweepThread::stop_icms();
1254       return space->end();
1255     }
1256 
1257     if (CMSTraceIncrementalMode) {
1258       space->print_on(gclog_or_tty);
1259       gclog_or_tty->stamp();
1260       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1261                              ", new limit=" PTR_FORMAT,
1262                              top, NULL);
1263     }
1264   }
1265 
1266   return NULL;
1267 }
1268 
1269 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1270   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1271   // allocate, copy and if necessary update promoinfo --
1272   // delegate to underlying space.
1273   assert_lock_strong(freelistLock());
1274 
1275 #ifndef PRODUCT
1276   if (Universe::heap()->promotion_should_fail()) {
1277     return NULL;
1278   }
1279 #endif  // #ifndef PRODUCT
1280 
1281   oop res = _cmsSpace->promote(obj, obj_size);
1282   if (res == NULL) {
1283     // expand and retry
1284     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1285     expand(s*HeapWordSize, MinHeapDeltaBytes,
1286       CMSExpansionCause::_satisfy_promotion);
1287     // Since there's currently no next generation, we don't try to promote
1288     // into a more senior generation.
1289     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1290                                "is made to pass on a possibly failing "
1291                                "promotion to next generation");
1292     res = _cmsSpace->promote(obj, obj_size);
1293   }
1294   if (res != NULL) {
1295     // See comment in allocate() about when objects should
1296     // be allocated live.
1297     assert(obj->is_oop(), "Will dereference klass pointer below");
1298     collector()->promoted(false,           // Not parallel
1299                           (HeapWord*)res, obj->is_objArray(), obj_size);
1300     // promotion counters
1301     NOT_PRODUCT(
1302       _numObjectsPromoted++;
1303       _numWordsPromoted +=
1304         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1305     )
1306   }
1307   return res;
1308 }
1309 
1310 
1311 HeapWord*
1312 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1313                                              HeapWord* top,
1314                                              size_t word_sz)
1315 {
1316   return collector()->allocation_limit_reached(space, top, word_sz);
1317 }
1318 
1319 // IMPORTANT: Notes on object size recognition in CMS.
1320 // ---------------------------------------------------
1321 // A block of storage in the CMS generation is always in
1322 // one of three states. A free block (FREE), an allocated
1323 // object (OBJECT) whose size() method reports the correct size,
1324 // and an intermediate state (TRANSIENT) in which its size cannot
1325 // be accurately determined.
1326 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1327 // -----------------------------------------------------
1328 // FREE:      klass_word & 1 == 1; mark_word holds block size
1329 //
1330 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1331 //            obj->size() computes correct size
1332 //
1333 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1334 //
1335 // STATE IDENTIFICATION: (64 bit+COOPS)
1336 // ------------------------------------
1337 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1338 //
1339 // OBJECT:    klass_word installed; klass_word != 0;
1340 //            obj->size() computes correct size
1341 //
1342 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1343 //
1344 //
1345 // STATE TRANSITION DIAGRAM
1346 //
1347 //        mut / parnew                     mut  /  parnew
1348 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1349 //  ^                                                                   |
1350 //  |------------------------ DEAD <------------------------------------|
1351 //         sweep                            mut
1352 //
1353 // While a block is in TRANSIENT state its size cannot be determined
1354 // so readers will either need to come back later or stall until
1355 // the size can be determined. Note that for the case of direct
1356 // allocation, P-bits, when available, may be used to determine the
1357 // size of an object that may not yet have been initialized.
1358 
1359 // Things to support parallel young-gen collection.
1360 oop
1361 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1362                                            oop old, markOop m,
1363                                            size_t word_sz) {
1364 #ifndef PRODUCT
1365   if (Universe::heap()->promotion_should_fail()) {
1366     return NULL;
1367   }
1368 #endif  // #ifndef PRODUCT
1369 
1370   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1371   PromotionInfo* promoInfo = &ps->promo;
1372   // if we are tracking promotions, then first ensure space for
1373   // promotion (including spooling space for saving header if necessary).
1374   // then allocate and copy, then track promoted info if needed.
1375   // When tracking (see PromotionInfo::track()), the mark word may
1376   // be displaced and in this case restoration of the mark word
1377   // occurs in the (oop_since_save_marks_)iterate phase.
1378   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1379     // Out of space for allocating spooling buffers;
1380     // try expanding and allocating spooling buffers.
1381     if (!expand_and_ensure_spooling_space(promoInfo)) {
1382       return NULL;
1383     }
1384   }
1385   assert(promoInfo->has_spooling_space(), "Control point invariant");
1386   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1387   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1388   if (obj_ptr == NULL) {
1389      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1390      if (obj_ptr == NULL) {
1391        return NULL;
1392      }
1393   }
1394   oop obj = oop(obj_ptr);
1395   OrderAccess::storestore();
1396   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1397   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1398   // IMPORTANT: See note on object initialization for CMS above.
1399   // Otherwise, copy the object.  Here we must be careful to insert the
1400   // klass pointer last, since this marks the block as an allocated object.
1401   // Except with compressed oops it's the mark word.
1402   HeapWord* old_ptr = (HeapWord*)old;
1403   // Restore the mark word copied above.
1404   obj->set_mark(m);
1405   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1406   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1407   OrderAccess::storestore();
1408 
1409   if (UseCompressedKlassPointers) {
1410     // Copy gap missed by (aligned) header size calculation below
1411     obj->set_klass_gap(old->klass_gap());
1412   }
1413   if (word_sz > (size_t)oopDesc::header_size()) {
1414     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1415                                  obj_ptr + oopDesc::header_size(),
1416                                  word_sz - oopDesc::header_size());
1417   }
1418 
1419   // Now we can track the promoted object, if necessary.  We take care
1420   // to delay the transition from uninitialized to full object
1421   // (i.e., insertion of klass pointer) until after, so that it
1422   // atomically becomes a promoted object.
1423   if (promoInfo->tracking()) {
1424     promoInfo->track((PromotedObject*)obj, old->klass());
1425   }
1426   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1427   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1428   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1429 
1430   // Finally, install the klass pointer (this should be volatile).
1431   OrderAccess::storestore();
1432   obj->set_klass(old->klass());
1433   // We should now be able to calculate the right size for this object
1434   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1435 
1436   collector()->promoted(true,          // parallel
1437                         obj_ptr, old->is_objArray(), word_sz);
1438 
1439   NOT_PRODUCT(
1440     Atomic::inc_ptr(&_numObjectsPromoted);
1441     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1442   )
1443 
1444   return obj;
1445 }
1446 
1447 void
1448 ConcurrentMarkSweepGeneration::
1449 par_promote_alloc_undo(int thread_num,
1450                        HeapWord* obj, size_t word_sz) {
1451   // CMS does not support promotion undo.
1452   ShouldNotReachHere();
1453 }
1454 
1455 void
1456 ConcurrentMarkSweepGeneration::
1457 par_promote_alloc_done(int thread_num) {
1458   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1459   ps->lab.retire(thread_num);
1460 }
1461 
1462 void
1463 ConcurrentMarkSweepGeneration::
1464 par_oop_since_save_marks_iterate_done(int thread_num) {
1465   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1466   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1467   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1468 }
1469 
1470 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1471                                                    size_t size,
1472                                                    bool   tlab)
1473 {
1474   // We allow a STW collection only if a full
1475   // collection was requested.
1476   return full || should_allocate(size, tlab); // FIX ME !!!
1477   // This and promotion failure handling are connected at the
1478   // hip and should be fixed by untying them.
1479 }
1480 
1481 bool CMSCollector::shouldConcurrentCollect() {
1482   if (_full_gc_requested) {
1483     if (Verbose && PrintGCDetails) {
1484       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1485                              " gc request (or gc_locker)");
1486     }
1487     return true;
1488   }
1489 
1490   // For debugging purposes, change the type of collection.
1491   // If the rotation is not on the concurrent collection
1492   // type, don't start a concurrent collection.
1493   NOT_PRODUCT(
1494     if (RotateCMSCollectionTypes &&
1495         (_cmsGen->debug_collection_type() !=
1496           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1497       assert(_cmsGen->debug_collection_type() !=
1498         ConcurrentMarkSweepGeneration::Unknown_collection_type,
1499         "Bad cms collection type");
1500       return false;
1501     }
1502   )
1503 
1504   FreelistLocker x(this);
1505   // ------------------------------------------------------------------
1506   // Print out lots of information which affects the initiation of
1507   // a collection.
1508   if (PrintCMSInitiationStatistics && stats().valid()) {
1509     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1510     gclog_or_tty->stamp();
1511     gclog_or_tty->print_cr("");
1512     stats().print_on(gclog_or_tty);
1513     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1514       stats().time_until_cms_gen_full());
1515     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1516     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1517                            _cmsGen->contiguous_available());
1518     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1519     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1520     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1521     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1522     gclog_or_tty->print_cr("metadata initialized %d",
1523       MetaspaceGC::should_concurrent_collect());
1524   }
1525   // ------------------------------------------------------------------
1526 
1527   // If the estimated time to complete a cms collection (cms_duration())
1528   // is less than the estimated time remaining until the cms generation
1529   // is full, start a collection.
1530   if (!UseCMSInitiatingOccupancyOnly) {
1531     if (stats().valid()) {
1532       if (stats().time_until_cms_start() == 0.0) {
1533         return true;
1534       }
1535     } else {
1536       // We want to conservatively collect somewhat early in order
1537       // to try and "bootstrap" our CMS/promotion statistics;
1538       // this branch will not fire after the first successful CMS
1539       // collection because the stats should then be valid.
1540       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1541         if (Verbose && PrintGCDetails) {
1542           gclog_or_tty->print_cr(
1543             " CMSCollector: collect for bootstrapping statistics:"
1544             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1545             _bootstrap_occupancy);
1546         }
1547         return true;
1548       }
1549     }
1550   }
1551 
1552   // Otherwise, we start a collection cycle if
1553   // old gen want a collection cycle started. Each may use
1554   // an appropriate criterion for making this decision.
1555   // XXX We need to make sure that the gen expansion
1556   // criterion dovetails well with this. XXX NEED TO FIX THIS
1557   if (_cmsGen->should_concurrent_collect()) {
1558     if (Verbose && PrintGCDetails) {
1559       gclog_or_tty->print_cr("CMS old gen initiated");
1560     }
1561     return true;
1562   }
1563 
1564   // We start a collection if we believe an incremental collection may fail;
1565   // this is not likely to be productive in practice because it's probably too
1566   // late anyway.
1567   GenCollectedHeap* gch = GenCollectedHeap::heap();
1568   assert(gch->collector_policy()->is_two_generation_policy(),
1569          "You may want to check the correctness of the following");
1570   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1571     if (Verbose && PrintGCDetails) {
1572       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1573     }
1574     return true;
1575   }
1576 
1577   if (MetaspaceGC::should_concurrent_collect()) {
1578       if (Verbose && PrintGCDetails) {
1579       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1580       }
1581       return true;
1582     }
1583 
1584   return false;
1585 }
1586 
1587 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1588 
1589 // Clear _expansion_cause fields of constituent generations
1590 void CMSCollector::clear_expansion_cause() {
1591   _cmsGen->clear_expansion_cause();
1592 }
1593 
1594 // We should be conservative in starting a collection cycle.  To
1595 // start too eagerly runs the risk of collecting too often in the
1596 // extreme.  To collect too rarely falls back on full collections,
1597 // which works, even if not optimum in terms of concurrent work.
1598 // As a work around for too eagerly collecting, use the flag
1599 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1600 // giving the user an easily understandable way of controlling the
1601 // collections.
1602 // We want to start a new collection cycle if any of the following
1603 // conditions hold:
1604 // . our current occupancy exceeds the configured initiating occupancy
1605 //   for this generation, or
1606 // . we recently needed to expand this space and have not, since that
1607 //   expansion, done a collection of this generation, or
1608 // . the underlying space believes that it may be a good idea to initiate
1609 //   a concurrent collection (this may be based on criteria such as the
1610 //   following: the space uses linear allocation and linear allocation is
1611 //   going to fail, or there is believed to be excessive fragmentation in
1612 //   the generation, etc... or ...
1613 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1614 //   the case of the old generation; see CR 6543076):
1615 //   we may be approaching a point at which allocation requests may fail because
1616 //   we will be out of sufficient free space given allocation rate estimates.]
1617 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1618 
1619   assert_lock_strong(freelistLock());
1620   if (occupancy() > initiating_occupancy()) {
1621     if (PrintGCDetails && Verbose) {
1622       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1623         short_name(), occupancy(), initiating_occupancy());
1624     }
1625     return true;
1626   }
1627   if (UseCMSInitiatingOccupancyOnly) {
1628     return false;
1629   }
1630   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1631     if (PrintGCDetails && Verbose) {
1632       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1633         short_name());
1634     }
1635     return true;
1636   }
1637   if (_cmsSpace->should_concurrent_collect()) {
1638     if (PrintGCDetails && Verbose) {
1639       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1640         short_name());
1641     }
1642     return true;
1643   }
1644   return false;
1645 }
1646 
1647 void ConcurrentMarkSweepGeneration::collect(bool   full,
1648                                             bool   clear_all_soft_refs,
1649                                             size_t size,
1650                                             bool   tlab)
1651 {
1652   collector()->collect(full, clear_all_soft_refs, size, tlab);
1653 }
1654 
1655 void CMSCollector::collect(bool   full,
1656                            bool   clear_all_soft_refs,
1657                            size_t size,
1658                            bool   tlab)
1659 {
1660   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1661     // For debugging purposes skip the collection if the state
1662     // is not currently idle
1663     if (TraceCMSState) {
1664       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1665         Thread::current(), full, _collectorState);
1666     }
1667     return;
1668   }
1669 
1670   // The following "if" branch is present for defensive reasons.
1671   // In the current uses of this interface, it can be replaced with:
1672   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1673   // But I am not placing that assert here to allow future
1674   // generality in invoking this interface.
1675   if (GC_locker::is_active()) {
1676     // A consistency test for GC_locker
1677     assert(GC_locker::needs_gc(), "Should have been set already");
1678     // Skip this foreground collection, instead
1679     // expanding the heap if necessary.
1680     // Need the free list locks for the call to free() in compute_new_size()
1681     compute_new_size();
1682     return;
1683   }
1684   acquire_control_and_collect(full, clear_all_soft_refs);
1685   _full_gcs_since_conc_gc++;
1686 }
1687 
1688 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1689   GenCollectedHeap* gch = GenCollectedHeap::heap();
1690   unsigned int gc_count = gch->total_full_collections();
1691   if (gc_count == full_gc_count) {
1692     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1693     _full_gc_requested = true;
1694     _full_gc_cause = cause;
1695     CGC_lock->notify();   // nudge CMS thread
1696   } else {
1697     assert(gc_count > full_gc_count, "Error: causal loop");
1698   }
1699 }
1700 
1701 bool CMSCollector::is_external_interruption() {
1702   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1703   return GCCause::is_user_requested_gc(cause) ||
1704          GCCause::is_serviceability_requested_gc(cause);
1705 }
1706 
1707 void CMSCollector::report_concurrent_mode_interruption() {
1708   if (is_external_interruption()) {
1709     if (PrintGCDetails) {
1710       gclog_or_tty->print(" (concurrent mode interrupted)");
1711     }
1712   } else {
1713     if (PrintGCDetails) {
1714       gclog_or_tty->print(" (concurrent mode failure)");
1715     }
1716     _gc_tracer_cm->report_concurrent_mode_failure();
1717   }
1718 }
1719 
1720 
1721 // The foreground and background collectors need to coordinate in order
1722 // to make sure that they do not mutually interfere with CMS collections.
1723 // When a background collection is active,
1724 // the foreground collector may need to take over (preempt) and
1725 // synchronously complete an ongoing collection. Depending on the
1726 // frequency of the background collections and the heap usage
1727 // of the application, this preemption can be seldom or frequent.
1728 // There are only certain
1729 // points in the background collection that the "collection-baton"
1730 // can be passed to the foreground collector.
1731 //
1732 // The foreground collector will wait for the baton before
1733 // starting any part of the collection.  The foreground collector
1734 // will only wait at one location.
1735 //
1736 // The background collector will yield the baton before starting a new
1737 // phase of the collection (e.g., before initial marking, marking from roots,
1738 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1739 // of the loop which switches the phases. The background collector does some
1740 // of the phases (initial mark, final re-mark) with the world stopped.
1741 // Because of locking involved in stopping the world,
1742 // the foreground collector should not block waiting for the background
1743 // collector when it is doing a stop-the-world phase.  The background
1744 // collector will yield the baton at an additional point just before
1745 // it enters a stop-the-world phase.  Once the world is stopped, the
1746 // background collector checks the phase of the collection.  If the
1747 // phase has not changed, it proceeds with the collection.  If the
1748 // phase has changed, it skips that phase of the collection.  See
1749 // the comments on the use of the Heap_lock in collect_in_background().
1750 //
1751 // Variable used in baton passing.
1752 //   _foregroundGCIsActive - Set to true by the foreground collector when
1753 //      it wants the baton.  The foreground clears it when it has finished
1754 //      the collection.
1755 //   _foregroundGCShouldWait - Set to true by the background collector
1756 //        when it is running.  The foreground collector waits while
1757 //      _foregroundGCShouldWait is true.
1758 //  CGC_lock - monitor used to protect access to the above variables
1759 //      and to notify the foreground and background collectors.
1760 //  _collectorState - current state of the CMS collection.
1761 //
1762 // The foreground collector
1763 //   acquires the CGC_lock
1764 //   sets _foregroundGCIsActive
1765 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1766 //     various locks acquired in preparation for the collection
1767 //     are released so as not to block the background collector
1768 //     that is in the midst of a collection
1769 //   proceeds with the collection
1770 //   clears _foregroundGCIsActive
1771 //   returns
1772 //
1773 // The background collector in a loop iterating on the phases of the
1774 //      collection
1775 //   acquires the CGC_lock
1776 //   sets _foregroundGCShouldWait
1777 //   if _foregroundGCIsActive is set
1778 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1779 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1780 //     and exits the loop.
1781 //   otherwise
1782 //     proceed with that phase of the collection
1783 //     if the phase is a stop-the-world phase,
1784 //       yield the baton once more just before enqueueing
1785 //       the stop-world CMS operation (executed by the VM thread).
1786 //   returns after all phases of the collection are done
1787 //
1788 
1789 void CMSCollector::acquire_control_and_collect(bool full,
1790         bool clear_all_soft_refs) {
1791   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1792   assert(!Thread::current()->is_ConcurrentGC_thread(),
1793          "shouldn't try to acquire control from self!");
1794 
1795   // Start the protocol for acquiring control of the
1796   // collection from the background collector (aka CMS thread).
1797   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1798          "VM thread should have CMS token");
1799   // Remember the possibly interrupted state of an ongoing
1800   // concurrent collection
1801   CollectorState first_state = _collectorState;
1802 
1803   // Signal to a possibly ongoing concurrent collection that
1804   // we want to do a foreground collection.
1805   _foregroundGCIsActive = true;
1806 
1807   // Disable incremental mode during a foreground collection.
1808   ICMSDisabler icms_disabler;
1809 
1810   // release locks and wait for a notify from the background collector
1811   // releasing the locks in only necessary for phases which
1812   // do yields to improve the granularity of the collection.
1813   assert_lock_strong(bitMapLock());
1814   // We need to lock the Free list lock for the space that we are
1815   // currently collecting.
1816   assert(haveFreelistLocks(), "Must be holding free list locks");
1817   bitMapLock()->unlock();
1818   releaseFreelistLocks();
1819   {
1820     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1821     if (_foregroundGCShouldWait) {
1822       // We are going to be waiting for action for the CMS thread;
1823       // it had better not be gone (for instance at shutdown)!
1824       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1825              "CMS thread must be running");
1826       // Wait here until the background collector gives us the go-ahead
1827       ConcurrentMarkSweepThread::clear_CMS_flag(
1828         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1829       // Get a possibly blocked CMS thread going:
1830       //   Note that we set _foregroundGCIsActive true above,
1831       //   without protection of the CGC_lock.
1832       CGC_lock->notify();
1833       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1834              "Possible deadlock");
1835       while (_foregroundGCShouldWait) {
1836         // wait for notification
1837         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1838         // Possibility of delay/starvation here, since CMS token does
1839         // not know to give priority to VM thread? Actually, i think
1840         // there wouldn't be any delay/starvation, but the proof of
1841         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1842       }
1843       ConcurrentMarkSweepThread::set_CMS_flag(
1844         ConcurrentMarkSweepThread::CMS_vm_has_token);
1845     }
1846   }
1847   // The CMS_token is already held.  Get back the other locks.
1848   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1849          "VM thread should have CMS token");
1850   getFreelistLocks();
1851   bitMapLock()->lock_without_safepoint_check();
1852   if (TraceCMSState) {
1853     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1854       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1855     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1856   }
1857 
1858   // Check if we need to do a compaction, or if not, whether
1859   // we need to start the mark-sweep from scratch.
1860   bool should_compact    = false;
1861   bool should_start_over = false;
1862   decide_foreground_collection_type(clear_all_soft_refs,
1863     &should_compact, &should_start_over);
1864 
1865 NOT_PRODUCT(
1866   if (RotateCMSCollectionTypes) {
1867     if (_cmsGen->debug_collection_type() ==
1868         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1869       should_compact = true;
1870     } else if (_cmsGen->debug_collection_type() ==
1871                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1872       should_compact = false;
1873     }
1874   }
1875 )
1876 
1877   if (first_state > Idling) {
1878     report_concurrent_mode_interruption();
1879   }
1880 
1881   set_did_compact(should_compact);
1882   if (should_compact) {
1883     // If the collection is being acquired from the background
1884     // collector, there may be references on the discovered
1885     // references lists that have NULL referents (being those
1886     // that were concurrently cleared by a mutator) or
1887     // that are no longer active (having been enqueued concurrently
1888     // by the mutator).
1889     // Scrub the list of those references because Mark-Sweep-Compact
1890     // code assumes referents are not NULL and that all discovered
1891     // Reference objects are active.
1892     ref_processor()->clean_up_discovered_references();
1893 
1894     if (first_state > Idling) {
1895       save_heap_summary();
1896     }
1897 
1898     do_compaction_work(clear_all_soft_refs);
1899 
1900     // Has the GC time limit been exceeded?
1901     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1902     size_t max_eden_size = young_gen->max_capacity() -
1903                            young_gen->to()->capacity() -
1904                            young_gen->from()->capacity();
1905     GenCollectedHeap* gch = GenCollectedHeap::heap();
1906     GCCause::Cause gc_cause = gch->gc_cause();
1907     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1908                                            young_gen->eden()->used(),
1909                                            _cmsGen->max_capacity(),
1910                                            max_eden_size,
1911                                            full,
1912                                            gc_cause,
1913                                            gch->collector_policy());
1914   } else {
1915     do_mark_sweep_work(clear_all_soft_refs, first_state,
1916       should_start_over);
1917   }
1918   // Reset the expansion cause, now that we just completed
1919   // a collection cycle.
1920   clear_expansion_cause();
1921   _foregroundGCIsActive = false;
1922   return;
1923 }
1924 
1925 // Resize the tenured generation
1926 // after obtaining the free list locks for the
1927 // two generations.
1928 void CMSCollector::compute_new_size() {
1929   assert_locked_or_safepoint(Heap_lock);
1930   FreelistLocker z(this);
1931   MetaspaceGC::compute_new_size();
1932   _cmsGen->compute_new_size_free_list();
1933 }
1934 
1935 // A work method used by foreground collection to determine
1936 // what type of collection (compacting or not, continuing or fresh)
1937 // it should do.
1938 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1939 // and CMSCompactWhenClearAllSoftRefs the default in the future
1940 // and do away with the flags after a suitable period.
1941 void CMSCollector::decide_foreground_collection_type(
1942   bool clear_all_soft_refs, bool* should_compact,
1943   bool* should_start_over) {
1944   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1945   // flag is set, and we have either requested a System.gc() or
1946   // the number of full gc's since the last concurrent cycle
1947   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1948   // or if an incremental collection has failed
1949   GenCollectedHeap* gch = GenCollectedHeap::heap();
1950   assert(gch->collector_policy()->is_two_generation_policy(),
1951          "You may want to check the correctness of the following");
1952   // Inform cms gen if this was due to partial collection failing.
1953   // The CMS gen may use this fact to determine its expansion policy.
1954   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1955     assert(!_cmsGen->incremental_collection_failed(),
1956            "Should have been noticed, reacted to and cleared");
1957     _cmsGen->set_incremental_collection_failed();
1958   }
1959   *should_compact =
1960     UseCMSCompactAtFullCollection &&
1961     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1962      GCCause::is_user_requested_gc(gch->gc_cause()) ||
1963      gch->incremental_collection_will_fail(true /* consult_young */));
1964   *should_start_over = false;
1965   if (clear_all_soft_refs && !*should_compact) {
1966     // We are about to do a last ditch collection attempt
1967     // so it would normally make sense to do a compaction
1968     // to reclaim as much space as possible.
1969     if (CMSCompactWhenClearAllSoftRefs) {
1970       // Default: The rationale is that in this case either
1971       // we are past the final marking phase, in which case
1972       // we'd have to start over, or so little has been done
1973       // that there's little point in saving that work. Compaction
1974       // appears to be the sensible choice in either case.
1975       *should_compact = true;
1976     } else {
1977       // We have been asked to clear all soft refs, but not to
1978       // compact. Make sure that we aren't past the final checkpoint
1979       // phase, for that is where we process soft refs. If we are already
1980       // past that phase, we'll need to redo the refs discovery phase and
1981       // if necessary clear soft refs that weren't previously
1982       // cleared. We do so by remembering the phase in which
1983       // we came in, and if we are past the refs processing
1984       // phase, we'll choose to just redo the mark-sweep
1985       // collection from scratch.
1986       if (_collectorState > FinalMarking) {
1987         // We are past the refs processing phase;
1988         // start over and do a fresh synchronous CMS cycle
1989         _collectorState = Resetting; // skip to reset to start new cycle
1990         reset(false /* == !asynch */);
1991         *should_start_over = true;
1992       } // else we can continue a possibly ongoing current cycle
1993     }
1994   }
1995 }
1996 
1997 // A work method used by the foreground collector to do
1998 // a mark-sweep-compact.
1999 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
2000   GenCollectedHeap* gch = GenCollectedHeap::heap();
2001 
2002   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2003   gc_timer->register_gc_start(os::elapsed_counter());
2004 
2005   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2006   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2007 
2008   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2009   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2010     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2011       "collections passed to foreground collector", _full_gcs_since_conc_gc);
2012   }
2013 
2014   // Sample collection interval time and reset for collection pause.
2015   if (UseAdaptiveSizePolicy) {
2016     size_policy()->msc_collection_begin();
2017   }
2018 
2019   // Temporarily widen the span of the weak reference processing to
2020   // the entire heap.
2021   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2022   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2023   // Temporarily, clear the "is_alive_non_header" field of the
2024   // reference processor.
2025   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2026   // Temporarily make reference _processing_ single threaded (non-MT).
2027   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2028   // Temporarily make refs discovery atomic
2029   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2030   // Temporarily make reference _discovery_ single threaded (non-MT)
2031   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2032 
2033   ref_processor()->set_enqueuing_is_done(false);
2034   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2035   ref_processor()->setup_policy(clear_all_soft_refs);
2036   // If an asynchronous collection finishes, the _modUnionTable is
2037   // all clear.  If we are assuming the collection from an asynchronous
2038   // collection, clear the _modUnionTable.
2039   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2040     "_modUnionTable should be clear if the baton was not passed");
2041   _modUnionTable.clear_all();
2042   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2043     "mod union for klasses should be clear if the baton was passed");
2044   _ct->klass_rem_set()->clear_mod_union();
2045 
2046   // We must adjust the allocation statistics being maintained
2047   // in the free list space. We do so by reading and clearing
2048   // the sweep timer and updating the block flux rate estimates below.
2049   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2050   if (_inter_sweep_timer.is_active()) {
2051     _inter_sweep_timer.stop();
2052     // Note that we do not use this sample to update the _inter_sweep_estimate.
2053     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2054                                             _inter_sweep_estimate.padded_average(),
2055                                             _intra_sweep_estimate.padded_average());
2056   }
2057 
2058   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2059     ref_processor(), clear_all_soft_refs);
2060   #ifdef ASSERT
2061     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2062     size_t free_size = cms_space->free();
2063     assert(free_size ==
2064            pointer_delta(cms_space->end(), cms_space->compaction_top())
2065            * HeapWordSize,
2066       "All the free space should be compacted into one chunk at top");
2067     assert(cms_space->dictionary()->total_chunk_size(
2068                                       debug_only(cms_space->freelistLock())) == 0 ||
2069            cms_space->totalSizeInIndexedFreeLists() == 0,
2070       "All the free space should be in a single chunk");
2071     size_t num = cms_space->totalCount();
2072     assert((free_size == 0 && num == 0) ||
2073            (free_size > 0  && (num == 1 || num == 2)),
2074          "There should be at most 2 free chunks after compaction");
2075   #endif // ASSERT
2076   _collectorState = Resetting;
2077   assert(_restart_addr == NULL,
2078          "Should have been NULL'd before baton was passed");
2079   reset(false /* == !asynch */);
2080   _cmsGen->reset_after_compaction();
2081   _concurrent_cycles_since_last_unload = 0;
2082 
2083   // Clear any data recorded in the PLAB chunk arrays.
2084   if (_survivor_plab_array != NULL) {
2085     reset_survivor_plab_arrays();
2086   }
2087 
2088   // Adjust the per-size allocation stats for the next epoch.
2089   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2090   // Restart the "inter sweep timer" for the next epoch.
2091   _inter_sweep_timer.reset();
2092   _inter_sweep_timer.start();
2093 
2094   // Sample collection pause time and reset for collection interval.
2095   if (UseAdaptiveSizePolicy) {
2096     size_policy()->msc_collection_end(gch->gc_cause());
2097   }
2098 
2099   gc_timer->register_gc_end(os::elapsed_counter());
2100 
2101   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2102 
2103   // For a mark-sweep-compact, compute_new_size() will be called
2104   // in the heap's do_collection() method.
2105 }
2106 
2107 // A work method used by the foreground collector to do
2108 // a mark-sweep, after taking over from a possibly on-going
2109 // concurrent mark-sweep collection.
2110 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2111   CollectorState first_state, bool should_start_over) {
2112   if (PrintGC && Verbose) {
2113     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2114       "collector with count %d",
2115       _full_gcs_since_conc_gc);
2116   }
2117   switch (_collectorState) {
2118     case Idling:
2119       if (first_state == Idling || should_start_over) {
2120         // The background GC was not active, or should
2121         // restarted from scratch;  start the cycle.
2122         _collectorState = InitialMarking;
2123       }
2124       // If first_state was not Idling, then a background GC
2125       // was in progress and has now finished.  No need to do it
2126       // again.  Leave the state as Idling.
2127       break;
2128     case Precleaning:
2129       // In the foreground case don't do the precleaning since
2130       // it is not done concurrently and there is extra work
2131       // required.
2132       _collectorState = FinalMarking;
2133   }
2134   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2135 
2136   // For a mark-sweep, compute_new_size() will be called
2137   // in the heap's do_collection() method.
2138 }
2139 
2140 
2141 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2142   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2143   EdenSpace* eden_space = dng->eden();
2144   ContiguousSpace* from_space = dng->from();
2145   ContiguousSpace* to_space   = dng->to();
2146   // Eden
2147   if (_eden_chunk_array != NULL) {
2148     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2149                            eden_space->bottom(), eden_space->top(),
2150                            eden_space->end(), eden_space->capacity());
2151     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2152                            "_eden_chunk_capacity=" SIZE_FORMAT,
2153                            _eden_chunk_index, _eden_chunk_capacity);
2154     for (size_t i = 0; i < _eden_chunk_index; i++) {
2155       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2156                              i, _eden_chunk_array[i]);
2157     }
2158   }
2159   // Survivor
2160   if (_survivor_chunk_array != NULL) {
2161     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2162                            from_space->bottom(), from_space->top(),
2163                            from_space->end(), from_space->capacity());
2164     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2165                            "_survivor_chunk_capacity=" SIZE_FORMAT,
2166                            _survivor_chunk_index, _survivor_chunk_capacity);
2167     for (size_t i = 0; i < _survivor_chunk_index; i++) {
2168       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2169                              i, _survivor_chunk_array[i]);
2170     }
2171   }
2172 }
2173 
2174 void CMSCollector::getFreelistLocks() const {
2175   // Get locks for all free lists in all generations that this
2176   // collector is responsible for
2177   _cmsGen->freelistLock()->lock_without_safepoint_check();
2178 }
2179 
2180 void CMSCollector::releaseFreelistLocks() const {
2181   // Release locks for all free lists in all generations that this
2182   // collector is responsible for
2183   _cmsGen->freelistLock()->unlock();
2184 }
2185 
2186 bool CMSCollector::haveFreelistLocks() const {
2187   // Check locks for all free lists in all generations that this
2188   // collector is responsible for
2189   assert_lock_strong(_cmsGen->freelistLock());
2190   PRODUCT_ONLY(ShouldNotReachHere());
2191   return true;
2192 }
2193 
2194 // A utility class that is used by the CMS collector to
2195 // temporarily "release" the foreground collector from its
2196 // usual obligation to wait for the background collector to
2197 // complete an ongoing phase before proceeding.
2198 class ReleaseForegroundGC: public StackObj {
2199  private:
2200   CMSCollector* _c;
2201  public:
2202   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2203     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2204     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2205     // allow a potentially blocked foreground collector to proceed
2206     _c->_foregroundGCShouldWait = false;
2207     if (_c->_foregroundGCIsActive) {
2208       CGC_lock->notify();
2209     }
2210     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2211            "Possible deadlock");
2212   }
2213 
2214   ~ReleaseForegroundGC() {
2215     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2216     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2217     _c->_foregroundGCShouldWait = true;
2218   }
2219 };
2220 
2221 // There are separate collect_in_background and collect_in_foreground because of
2222 // the different locking requirements of the background collector and the
2223 // foreground collector.  There was originally an attempt to share
2224 // one "collect" method between the background collector and the foreground
2225 // collector but the if-then-else required made it cleaner to have
2226 // separate methods.
2227 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2228   assert(Thread::current()->is_ConcurrentGC_thread(),
2229     "A CMS asynchronous collection is only allowed on a CMS thread.");
2230 
2231   GenCollectedHeap* gch = GenCollectedHeap::heap();
2232   {
2233     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2234     MutexLockerEx hl(Heap_lock, safepoint_check);
2235     FreelistLocker fll(this);
2236     MutexLockerEx x(CGC_lock, safepoint_check);
2237     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2238       // The foreground collector is active or we're
2239       // not using asynchronous collections.  Skip this
2240       // background collection.
2241       assert(!_foregroundGCShouldWait, "Should be clear");
2242       return;
2243     } else {
2244       assert(_collectorState == Idling, "Should be idling before start.");
2245       _collectorState = InitialMarking;
2246       register_gc_start(cause);
2247       // Reset the expansion cause, now that we are about to begin
2248       // a new cycle.
2249       clear_expansion_cause();
2250 
2251       // Clear the MetaspaceGC flag since a concurrent collection
2252       // is starting but also clear it after the collection.
2253       MetaspaceGC::set_should_concurrent_collect(false);
2254     }
2255     // Decide if we want to enable class unloading as part of the
2256     // ensuing concurrent GC cycle.
2257     update_should_unload_classes();
2258     _full_gc_requested = false;           // acks all outstanding full gc requests
2259     _full_gc_cause = GCCause::_no_gc;
2260     // Signal that we are about to start a collection
2261     gch->increment_total_full_collections();  // ... starting a collection cycle
2262     _collection_count_start = gch->total_full_collections();
2263   }
2264 
2265   // Used for PrintGC
2266   size_t prev_used;
2267   if (PrintGC && Verbose) {
2268     prev_used = _cmsGen->used(); // XXXPERM
2269   }
2270 
2271   // The change of the collection state is normally done at this level;
2272   // the exceptions are phases that are executed while the world is
2273   // stopped.  For those phases the change of state is done while the
2274   // world is stopped.  For baton passing purposes this allows the
2275   // background collector to finish the phase and change state atomically.
2276   // The foreground collector cannot wait on a phase that is done
2277   // while the world is stopped because the foreground collector already
2278   // has the world stopped and would deadlock.
2279   while (_collectorState != Idling) {
2280     if (TraceCMSState) {
2281       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2282         Thread::current(), _collectorState);
2283     }
2284     // The foreground collector
2285     //   holds the Heap_lock throughout its collection.
2286     //   holds the CMS token (but not the lock)
2287     //     except while it is waiting for the background collector to yield.
2288     //
2289     // The foreground collector should be blocked (not for long)
2290     //   if the background collector is about to start a phase
2291     //   executed with world stopped.  If the background
2292     //   collector has already started such a phase, the
2293     //   foreground collector is blocked waiting for the
2294     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
2295     //   are executed in the VM thread.
2296     //
2297     // The locking order is
2298     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
2299     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
2300     //   CMS token  (claimed in
2301     //                stop_world_and_do() -->
2302     //                  safepoint_synchronize() -->
2303     //                    CMSThread::synchronize())
2304 
2305     {
2306       // Check if the FG collector wants us to yield.
2307       CMSTokenSync x(true); // is cms thread
2308       if (waitForForegroundGC()) {
2309         // We yielded to a foreground GC, nothing more to be
2310         // done this round.
2311         assert(_foregroundGCShouldWait == false, "We set it to false in "
2312                "waitForForegroundGC()");
2313         if (TraceCMSState) {
2314           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2315             " exiting collection CMS state %d",
2316             Thread::current(), _collectorState);
2317         }
2318         return;
2319       } else {
2320         // The background collector can run but check to see if the
2321         // foreground collector has done a collection while the
2322         // background collector was waiting to get the CGC_lock
2323         // above.  If yes, break so that _foregroundGCShouldWait
2324         // is cleared before returning.
2325         if (_collectorState == Idling) {
2326           break;
2327         }
2328       }
2329     }
2330 
2331     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2332       "should be waiting");
2333 
2334     switch (_collectorState) {
2335       case InitialMarking:
2336         {
2337           ReleaseForegroundGC x(this);
2338           stats().record_cms_begin();
2339           VM_CMS_Initial_Mark initial_mark_op(this);
2340           VMThread::execute(&initial_mark_op);
2341         }
2342         // The collector state may be any legal state at this point
2343         // since the background collector may have yielded to the
2344         // foreground collector.
2345         break;
2346       case Marking:
2347         // initial marking in checkpointRootsInitialWork has been completed
2348         if (markFromRoots(true)) { // we were successful
2349           assert(_collectorState == Precleaning, "Collector state should "
2350             "have changed");
2351         } else {
2352           assert(_foregroundGCIsActive, "Internal state inconsistency");
2353         }
2354         break;
2355       case Precleaning:
2356         if (UseAdaptiveSizePolicy) {
2357           size_policy()->concurrent_precleaning_begin();
2358         }
2359         // marking from roots in markFromRoots has been completed
2360         preclean();
2361         if (UseAdaptiveSizePolicy) {
2362           size_policy()->concurrent_precleaning_end();
2363         }
2364         assert(_collectorState == AbortablePreclean ||
2365                _collectorState == FinalMarking,
2366                "Collector state should have changed");
2367         break;
2368       case AbortablePreclean:
2369         if (UseAdaptiveSizePolicy) {
2370         size_policy()->concurrent_phases_resume();
2371         }
2372         abortable_preclean();
2373         if (UseAdaptiveSizePolicy) {
2374           size_policy()->concurrent_precleaning_end();
2375         }
2376         assert(_collectorState == FinalMarking, "Collector state should "
2377           "have changed");
2378         break;
2379       case FinalMarking:
2380         {
2381           ReleaseForegroundGC x(this);
2382 
2383           VM_CMS_Final_Remark final_remark_op(this);
2384           VMThread::execute(&final_remark_op);
2385         }
2386         assert(_foregroundGCShouldWait, "block post-condition");
2387         break;
2388       case Sweeping:
2389         if (UseAdaptiveSizePolicy) {
2390           size_policy()->concurrent_sweeping_begin();
2391         }
2392         // final marking in checkpointRootsFinal has been completed
2393         sweep(true);
2394         assert(_collectorState == Resizing, "Collector state change "
2395           "to Resizing must be done under the free_list_lock");
2396         _full_gcs_since_conc_gc = 0;
2397 
2398         // Stop the timers for adaptive size policy for the concurrent phases
2399         if (UseAdaptiveSizePolicy) {
2400           size_policy()->concurrent_sweeping_end();
2401           size_policy()->concurrent_phases_end(gch->gc_cause(),
2402                                              gch->prev_gen(_cmsGen)->capacity(),
2403                                              _cmsGen->free());
2404         }
2405 
2406       case Resizing: {
2407         // Sweeping has been completed...
2408         // At this point the background collection has completed.
2409         // Don't move the call to compute_new_size() down
2410         // into code that might be executed if the background
2411         // collection was preempted.
2412         {
2413           ReleaseForegroundGC x(this);   // unblock FG collection
2414           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2415           CMSTokenSync        z(true);   // not strictly needed.
2416           if (_collectorState == Resizing) {
2417             compute_new_size();
2418             save_heap_summary();
2419             _collectorState = Resetting;
2420           } else {
2421             assert(_collectorState == Idling, "The state should only change"
2422                    " because the foreground collector has finished the collection");
2423           }
2424         }
2425         break;
2426       }
2427       case Resetting:
2428         // CMS heap resizing has been completed
2429         reset(true);
2430         assert(_collectorState == Idling, "Collector state should "
2431           "have changed");
2432 
2433         MetaspaceGC::set_should_concurrent_collect(false);
2434 
2435         stats().record_cms_end();
2436         // Don't move the concurrent_phases_end() and compute_new_size()
2437         // calls to here because a preempted background collection
2438         // has it's state set to "Resetting".
2439         break;
2440       case Idling:
2441       default:
2442         ShouldNotReachHere();
2443         break;
2444     }
2445     if (TraceCMSState) {
2446       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2447         Thread::current(), _collectorState);
2448     }
2449     assert(_foregroundGCShouldWait, "block post-condition");
2450   }
2451 
2452   // Should this be in gc_epilogue?
2453   collector_policy()->counters()->update_counters();
2454 
2455   {
2456     // Clear _foregroundGCShouldWait and, in the event that the
2457     // foreground collector is waiting, notify it, before
2458     // returning.
2459     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2460     _foregroundGCShouldWait = false;
2461     if (_foregroundGCIsActive) {
2462       CGC_lock->notify();
2463     }
2464     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2465            "Possible deadlock");
2466   }
2467   if (TraceCMSState) {
2468     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2469       " exiting collection CMS state %d",
2470       Thread::current(), _collectorState);
2471   }
2472   if (PrintGC && Verbose) {
2473     _cmsGen->print_heap_change(prev_used);
2474   }
2475 }
2476 
2477 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2478   if (!_cms_start_registered) {
2479     register_gc_start(cause);
2480   }
2481 }
2482 
2483 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2484   _cms_start_registered = true;
2485   _gc_timer_cm->register_gc_start(os::elapsed_counter());
2486   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2487 }
2488 
2489 void CMSCollector::register_gc_end() {
2490   if (_cms_start_registered) {
2491     report_heap_summary(GCWhen::AfterGC);
2492 
2493     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2494     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2495     _cms_start_registered = false;
2496   }
2497 }
2498 
2499 void CMSCollector::save_heap_summary() {
2500   GenCollectedHeap* gch = GenCollectedHeap::heap();
2501   _last_heap_summary = gch->create_heap_summary();
2502   _last_metaspace_summary = gch->create_metaspace_summary();
2503 }
2504 
2505 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2506   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
2507 }
2508 
2509 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2510   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2511          "Foreground collector should be waiting, not executing");
2512   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2513     "may only be done by the VM Thread with the world stopped");
2514   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2515          "VM thread should have CMS token");
2516 
2517   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2518     true, NULL);)
2519   if (UseAdaptiveSizePolicy) {
2520     size_policy()->ms_collection_begin();
2521   }
2522   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2523 
2524   HandleMark hm;  // Discard invalid handles created during verification
2525 
2526   if (VerifyBeforeGC &&
2527       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2528     Universe::verify();
2529   }
2530 
2531   // Snapshot the soft reference policy to be used in this collection cycle.
2532   ref_processor()->setup_policy(clear_all_soft_refs);
2533 
2534   bool init_mark_was_synchronous = false; // until proven otherwise
2535   while (_collectorState != Idling) {
2536     if (TraceCMSState) {
2537       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2538         Thread::current(), _collectorState);
2539     }
2540     switch (_collectorState) {
2541       case InitialMarking:
2542         register_foreground_gc_start(cause);
2543         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2544         checkpointRootsInitial(false);
2545         assert(_collectorState == Marking, "Collector state should have changed"
2546           " within checkpointRootsInitial()");
2547         break;
2548       case Marking:
2549         // initial marking in checkpointRootsInitialWork has been completed
2550         if (VerifyDuringGC &&
2551             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2552           Universe::verify("Verify before initial mark: ");
2553         }
2554         {
2555           bool res = markFromRoots(false);
2556           assert(res && _collectorState == FinalMarking, "Collector state should "
2557             "have changed");
2558           break;
2559         }
2560       case FinalMarking:
2561         if (VerifyDuringGC &&
2562             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2563           Universe::verify("Verify before re-mark: ");
2564         }
2565         checkpointRootsFinal(false, clear_all_soft_refs,
2566                              init_mark_was_synchronous);
2567         assert(_collectorState == Sweeping, "Collector state should not "
2568           "have changed within checkpointRootsFinal()");
2569         break;
2570       case Sweeping:
2571         // final marking in checkpointRootsFinal has been completed
2572         if (VerifyDuringGC &&
2573             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2574           Universe::verify("Verify before sweep: ");
2575         }
2576         sweep(false);
2577         assert(_collectorState == Resizing, "Incorrect state");
2578         break;
2579       case Resizing: {
2580         // Sweeping has been completed; the actual resize in this case
2581         // is done separately; nothing to be done in this state.
2582         _collectorState = Resetting;
2583         break;
2584       }
2585       case Resetting:
2586         // The heap has been resized.
2587         if (VerifyDuringGC &&
2588             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2589           Universe::verify("Verify before reset: ");
2590         }
2591         save_heap_summary();
2592         reset(false);
2593         assert(_collectorState == Idling, "Collector state should "
2594           "have changed");
2595         break;
2596       case Precleaning:
2597       case AbortablePreclean:
2598         // Elide the preclean phase
2599         _collectorState = FinalMarking;
2600         break;
2601       default:
2602         ShouldNotReachHere();
2603     }
2604     if (TraceCMSState) {
2605       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2606         Thread::current(), _collectorState);
2607     }
2608   }
2609 
2610   if (UseAdaptiveSizePolicy) {
2611     GenCollectedHeap* gch = GenCollectedHeap::heap();
2612     size_policy()->ms_collection_end(gch->gc_cause());
2613   }
2614 
2615   if (VerifyAfterGC &&
2616       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2617     Universe::verify();
2618   }
2619   if (TraceCMSState) {
2620     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2621       " exiting collection CMS state %d",
2622       Thread::current(), _collectorState);
2623   }
2624 }
2625 
2626 bool CMSCollector::waitForForegroundGC() {
2627   bool res = false;
2628   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2629          "CMS thread should have CMS token");
2630   // Block the foreground collector until the
2631   // background collectors decides whether to
2632   // yield.
2633   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2634   _foregroundGCShouldWait = true;
2635   if (_foregroundGCIsActive) {
2636     // The background collector yields to the
2637     // foreground collector and returns a value
2638     // indicating that it has yielded.  The foreground
2639     // collector can proceed.
2640     res = true;
2641     _foregroundGCShouldWait = false;
2642     ConcurrentMarkSweepThread::clear_CMS_flag(
2643       ConcurrentMarkSweepThread::CMS_cms_has_token);
2644     ConcurrentMarkSweepThread::set_CMS_flag(
2645       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2646     // Get a possibly blocked foreground thread going
2647     CGC_lock->notify();
2648     if (TraceCMSState) {
2649       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2650         Thread::current(), _collectorState);
2651     }
2652     while (_foregroundGCIsActive) {
2653       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2654     }
2655     ConcurrentMarkSweepThread::set_CMS_flag(
2656       ConcurrentMarkSweepThread::CMS_cms_has_token);
2657     ConcurrentMarkSweepThread::clear_CMS_flag(
2658       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2659   }
2660   if (TraceCMSState) {
2661     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2662       Thread::current(), _collectorState);
2663   }
2664   return res;
2665 }
2666 
2667 // Because of the need to lock the free lists and other structures in
2668 // the collector, common to all the generations that the collector is
2669 // collecting, we need the gc_prologues of individual CMS generations
2670 // delegate to their collector. It may have been simpler had the
2671 // current infrastructure allowed one to call a prologue on a
2672 // collector. In the absence of that we have the generation's
2673 // prologue delegate to the collector, which delegates back
2674 // some "local" work to a worker method in the individual generations
2675 // that it's responsible for collecting, while itself doing any
2676 // work common to all generations it's responsible for. A similar
2677 // comment applies to the  gc_epilogue()'s.
2678 // The role of the varaible _between_prologue_and_epilogue is to
2679 // enforce the invocation protocol.
2680 void CMSCollector::gc_prologue(bool full) {
2681   // Call gc_prologue_work() for the CMSGen
2682   // we are responsible for.
2683 
2684   // The following locking discipline assumes that we are only called
2685   // when the world is stopped.
2686   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2687 
2688   // The CMSCollector prologue must call the gc_prologues for the
2689   // "generations" that it's responsible
2690   // for.
2691 
2692   assert(   Thread::current()->is_VM_thread()
2693          || (   CMSScavengeBeforeRemark
2694              && Thread::current()->is_ConcurrentGC_thread()),
2695          "Incorrect thread type for prologue execution");
2696 
2697   if (_between_prologue_and_epilogue) {
2698     // We have already been invoked; this is a gc_prologue delegation
2699     // from yet another CMS generation that we are responsible for, just
2700     // ignore it since all relevant work has already been done.
2701     return;
2702   }
2703 
2704   // set a bit saying prologue has been called; cleared in epilogue
2705   _between_prologue_and_epilogue = true;
2706   // Claim locks for common data structures, then call gc_prologue_work()
2707   // for each CMSGen.
2708 
2709   getFreelistLocks();   // gets free list locks on constituent spaces
2710   bitMapLock()->lock_without_safepoint_check();
2711 
2712   // Should call gc_prologue_work() for all cms gens we are responsible for
2713   bool duringMarking =    _collectorState >= Marking
2714                          && _collectorState < Sweeping;
2715 
2716   // The young collections clear the modified oops state, which tells if
2717   // there are any modified oops in the class. The remark phase also needs
2718   // that information. Tell the young collection to save the union of all
2719   // modified klasses.
2720   if (duringMarking) {
2721     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2722   }
2723 
2724   bool registerClosure = duringMarking;
2725 
2726   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2727                                                &_modUnionClosurePar
2728                                                : &_modUnionClosure;
2729   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2730 
2731   if (!full) {
2732     stats().record_gc0_begin();
2733   }
2734 }
2735 
2736 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2737 
2738   _capacity_at_prologue = capacity();
2739   _used_at_prologue = used();
2740 
2741   // Delegate to CMScollector which knows how to coordinate between
2742   // this and any other CMS generations that it is responsible for
2743   // collecting.
2744   collector()->gc_prologue(full);
2745 }
2746 
2747 // This is a "private" interface for use by this generation's CMSCollector.
2748 // Not to be called directly by any other entity (for instance,
2749 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2750 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2751   bool registerClosure, ModUnionClosure* modUnionClosure) {
2752   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2753   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2754     "Should be NULL");
2755   if (registerClosure) {
2756     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2757   }
2758   cmsSpace()->gc_prologue();
2759   // Clear stat counters
2760   NOT_PRODUCT(
2761     assert(_numObjectsPromoted == 0, "check");
2762     assert(_numWordsPromoted   == 0, "check");
2763     if (Verbose && PrintGC) {
2764       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2765                           SIZE_FORMAT" bytes concurrently",
2766       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2767     }
2768     _numObjectsAllocated = 0;
2769     _numWordsAllocated   = 0;
2770   )
2771 }
2772 
2773 void CMSCollector::gc_epilogue(bool full) {
2774   // The following locking discipline assumes that we are only called
2775   // when the world is stopped.
2776   assert(SafepointSynchronize::is_at_safepoint(),
2777          "world is stopped assumption");
2778 
2779   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2780   // if linear allocation blocks need to be appropriately marked to allow the
2781   // the blocks to be parsable. We also check here whether we need to nudge the
2782   // CMS collector thread to start a new cycle (if it's not already active).
2783   assert(   Thread::current()->is_VM_thread()
2784          || (   CMSScavengeBeforeRemark
2785              && Thread::current()->is_ConcurrentGC_thread()),
2786          "Incorrect thread type for epilogue execution");
2787 
2788   if (!_between_prologue_and_epilogue) {
2789     // We have already been invoked; this is a gc_epilogue delegation
2790     // from yet another CMS generation that we are responsible for, just
2791     // ignore it since all relevant work has already been done.
2792     return;
2793   }
2794   assert(haveFreelistLocks(), "must have freelist locks");
2795   assert_lock_strong(bitMapLock());
2796 
2797   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2798 
2799   _cmsGen->gc_epilogue_work(full);
2800 
2801   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2802     // in case sampling was not already enabled, enable it
2803     _start_sampling = true;
2804   }
2805   // reset _eden_chunk_array so sampling starts afresh
2806   _eden_chunk_index = 0;
2807 
2808   size_t cms_used   = _cmsGen->cmsSpace()->used();
2809 
2810   // update performance counters - this uses a special version of
2811   // update_counters() that allows the utilization to be passed as a
2812   // parameter, avoiding multiple calls to used().
2813   //
2814   _cmsGen->update_counters(cms_used);
2815 
2816   if (CMSIncrementalMode) {
2817     icms_update_allocation_limits();
2818   }
2819 
2820   bitMapLock()->unlock();
2821   releaseFreelistLocks();
2822 
2823   if (!CleanChunkPoolAsync) {
2824     Chunk::clean_chunk_pool();
2825   }
2826 
2827   set_did_compact(false);
2828   _between_prologue_and_epilogue = false;  // ready for next cycle
2829 }
2830 
2831 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2832   collector()->gc_epilogue(full);
2833 
2834   // Also reset promotion tracking in par gc thread states.
2835   if (CollectedHeap::use_parallel_gc_threads()) {
2836     for (uint i = 0; i < ParallelGCThreads; i++) {
2837       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2838     }
2839   }
2840 }
2841 
2842 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2843   assert(!incremental_collection_failed(), "Should have been cleared");
2844   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2845   cmsSpace()->gc_epilogue();
2846     // Print stat counters
2847   NOT_PRODUCT(
2848     assert(_numObjectsAllocated == 0, "check");
2849     assert(_numWordsAllocated == 0, "check");
2850     if (Verbose && PrintGC) {
2851       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2852                           SIZE_FORMAT" bytes",
2853                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2854     }
2855     _numObjectsPromoted = 0;
2856     _numWordsPromoted   = 0;
2857   )
2858 
2859   if (PrintGC && Verbose) {
2860     // Call down the chain in contiguous_available needs the freelistLock
2861     // so print this out before releasing the freeListLock.
2862     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2863                         contiguous_available());
2864   }
2865 }
2866 
2867 #ifndef PRODUCT
2868 bool CMSCollector::have_cms_token() {
2869   Thread* thr = Thread::current();
2870   if (thr->is_VM_thread()) {
2871     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2872   } else if (thr->is_ConcurrentGC_thread()) {
2873     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2874   } else if (thr->is_GC_task_thread()) {
2875     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2876            ParGCRareEvent_lock->owned_by_self();
2877   }
2878   return false;
2879 }
2880 #endif
2881 
2882 // Check reachability of the given heap address in CMS generation,
2883 // treating all other generations as roots.
2884 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2885   // We could "guarantee" below, rather than assert, but i'll
2886   // leave these as "asserts" so that an adventurous debugger
2887   // could try this in the product build provided some subset of
2888   // the conditions were met, provided they were intersted in the
2889   // results and knew that the computation below wouldn't interfere
2890   // with other concurrent computations mutating the structures
2891   // being read or written.
2892   assert(SafepointSynchronize::is_at_safepoint(),
2893          "Else mutations in object graph will make answer suspect");
2894   assert(have_cms_token(), "Should hold cms token");
2895   assert(haveFreelistLocks(), "must hold free list locks");
2896   assert_lock_strong(bitMapLock());
2897 
2898   // Clear the marking bit map array before starting, but, just
2899   // for kicks, first report if the given address is already marked
2900   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2901                 _markBitMap.isMarked(addr) ? "" : " not");
2902 
2903   if (verify_after_remark()) {
2904     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2905     bool result = verification_mark_bm()->isMarked(addr);
2906     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2907                            result ? "IS" : "is NOT");
2908     return result;
2909   } else {
2910     gclog_or_tty->print_cr("Could not compute result");
2911     return false;
2912   }
2913 }
2914 
2915 
2916 void
2917 CMSCollector::print_on_error(outputStream* st) {
2918   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2919   if (collector != NULL) {
2920     CMSBitMap* bitmap = &collector->_markBitMap;
2921     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2922     bitmap->print_on_error(st, " Bits: ");
2923 
2924     st->cr();
2925 
2926     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2927     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2928     mut_bitmap->print_on_error(st, " Bits: ");
2929   }
2930 }
2931 
2932 ////////////////////////////////////////////////////////
2933 // CMS Verification Support
2934 ////////////////////////////////////////////////////////
2935 // Following the remark phase, the following invariant
2936 // should hold -- each object in the CMS heap which is
2937 // marked in markBitMap() should be marked in the verification_mark_bm().
2938 
2939 class VerifyMarkedClosure: public BitMapClosure {
2940   CMSBitMap* _marks;
2941   bool       _failed;
2942 
2943  public:
2944   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2945 
2946   bool do_bit(size_t offset) {
2947     HeapWord* addr = _marks->offsetToHeapWord(offset);
2948     if (!_marks->isMarked(addr)) {
2949       oop(addr)->print_on(gclog_or_tty);
2950       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2951       _failed = true;
2952     }
2953     return true;
2954   }
2955 
2956   bool failed() { return _failed; }
2957 };
2958 
2959 bool CMSCollector::verify_after_remark(bool silent) {
2960   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2961   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2962   static bool init = false;
2963 
2964   assert(SafepointSynchronize::is_at_safepoint(),
2965          "Else mutations in object graph will make answer suspect");
2966   assert(have_cms_token(),
2967          "Else there may be mutual interference in use of "
2968          " verification data structures");
2969   assert(_collectorState > Marking && _collectorState <= Sweeping,
2970          "Else marking info checked here may be obsolete");
2971   assert(haveFreelistLocks(), "must hold free list locks");
2972   assert_lock_strong(bitMapLock());
2973 
2974 
2975   // Allocate marking bit map if not already allocated
2976   if (!init) { // first time
2977     if (!verification_mark_bm()->allocate(_span)) {
2978       return false;
2979     }
2980     init = true;
2981   }
2982 
2983   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2984 
2985   // Turn off refs discovery -- so we will be tracing through refs.
2986   // This is as intended, because by this time
2987   // GC must already have cleared any refs that need to be cleared,
2988   // and traced those that need to be marked; moreover,
2989   // the marking done here is not going to intefere in any
2990   // way with the marking information used by GC.
2991   NoRefDiscovery no_discovery(ref_processor());
2992 
2993   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2994 
2995   // Clear any marks from a previous round
2996   verification_mark_bm()->clear_all();
2997   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2998   verify_work_stacks_empty();
2999 
3000   GenCollectedHeap* gch = GenCollectedHeap::heap();
3001   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3002   // Update the saved marks which may affect the root scans.
3003   gch->save_marks();
3004 
3005   if (CMSRemarkVerifyVariant == 1) {
3006     // In this first variant of verification, we complete
3007     // all marking, then check if the new marks-verctor is
3008     // a subset of the CMS marks-vector.
3009     verify_after_remark_work_1();
3010   } else if (CMSRemarkVerifyVariant == 2) {
3011     // In this second variant of verification, we flag an error
3012     // (i.e. an object reachable in the new marks-vector not reachable
3013     // in the CMS marks-vector) immediately, also indicating the
3014     // identify of an object (A) that references the unmarked object (B) --
3015     // presumably, a mutation to A failed to be picked up by preclean/remark?
3016     verify_after_remark_work_2();
3017   } else {
3018     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
3019             CMSRemarkVerifyVariant);
3020   }
3021   if (!silent) gclog_or_tty->print(" done] ");
3022   return true;
3023 }
3024 
3025 void CMSCollector::verify_after_remark_work_1() {
3026   ResourceMark rm;
3027   HandleMark  hm;
3028   GenCollectedHeap* gch = GenCollectedHeap::heap();
3029 
3030   // Get a clear set of claim bits for the strong roots processing to work with.
3031   ClassLoaderDataGraph::clear_claimed_marks();
3032 
3033   // Mark from roots one level into CMS
3034   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3035   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3036 
3037   gch->gen_process_strong_roots(_cmsGen->level(),
3038                                 true,   // younger gens are roots
3039                                 true,   // activate StrongRootsScope
3040                                 false,  // not scavenging
3041                                 SharedHeap::ScanningOption(roots_scanning_options()),
3042                                 &notOlder,
3043                                 true,   // walk code active on stacks
3044                                 NULL,
3045                                 NULL); // SSS: Provide correct closure
3046 
3047   // Now mark from the roots
3048   MarkFromRootsClosure markFromRootsClosure(this, _span,
3049     verification_mark_bm(), verification_mark_stack(),
3050     false /* don't yield */, true /* verifying */);
3051   assert(_restart_addr == NULL, "Expected pre-condition");
3052   verification_mark_bm()->iterate(&markFromRootsClosure);
3053   while (_restart_addr != NULL) {
3054     // Deal with stack overflow: by restarting at the indicated
3055     // address.
3056     HeapWord* ra = _restart_addr;
3057     markFromRootsClosure.reset(ra);
3058     _restart_addr = NULL;
3059     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3060   }
3061   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3062   verify_work_stacks_empty();
3063 
3064   // Marking completed -- now verify that each bit marked in
3065   // verification_mark_bm() is also marked in markBitMap(); flag all
3066   // errors by printing corresponding objects.
3067   VerifyMarkedClosure vcl(markBitMap());
3068   verification_mark_bm()->iterate(&vcl);
3069   if (vcl.failed()) {
3070     gclog_or_tty->print("Verification failed");
3071     Universe::heap()->print_on(gclog_or_tty);
3072     fatal("CMS: failed marking verification after remark");
3073   }
3074 }
3075 
3076 class VerifyKlassOopsKlassClosure : public KlassClosure {
3077   class VerifyKlassOopsClosure : public OopClosure {
3078     CMSBitMap* _bitmap;
3079    public:
3080     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3081     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3082     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3083   } _oop_closure;
3084  public:
3085   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3086   void do_klass(Klass* k) {
3087     k->oops_do(&_oop_closure);
3088   }
3089 };
3090 
3091 void CMSCollector::verify_after_remark_work_2() {
3092   ResourceMark rm;
3093   HandleMark  hm;
3094   GenCollectedHeap* gch = GenCollectedHeap::heap();
3095 
3096   // Get a clear set of claim bits for the strong roots processing to work with.
3097   ClassLoaderDataGraph::clear_claimed_marks();
3098 
3099   // Mark from roots one level into CMS
3100   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3101                                      markBitMap());
3102   CMKlassClosure klass_closure(&notOlder);
3103 
3104   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3105   gch->gen_process_strong_roots(_cmsGen->level(),
3106                                 true,   // younger gens are roots
3107                                 true,   // activate StrongRootsScope
3108                                 false,  // not scavenging
3109                                 SharedHeap::ScanningOption(roots_scanning_options()),
3110                                 &notOlder,
3111                                 true,   // walk code active on stacks
3112                                 NULL,
3113                                 &klass_closure);
3114 
3115   // Now mark from the roots
3116   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3117     verification_mark_bm(), markBitMap(), verification_mark_stack());
3118   assert(_restart_addr == NULL, "Expected pre-condition");
3119   verification_mark_bm()->iterate(&markFromRootsClosure);
3120   while (_restart_addr != NULL) {
3121     // Deal with stack overflow: by restarting at the indicated
3122     // address.
3123     HeapWord* ra = _restart_addr;
3124     markFromRootsClosure.reset(ra);
3125     _restart_addr = NULL;
3126     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3127   }
3128   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3129   verify_work_stacks_empty();
3130 
3131   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3132   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3133 
3134   // Marking completed -- now verify that each bit marked in
3135   // verification_mark_bm() is also marked in markBitMap(); flag all
3136   // errors by printing corresponding objects.
3137   VerifyMarkedClosure vcl(markBitMap());
3138   verification_mark_bm()->iterate(&vcl);
3139   assert(!vcl.failed(), "Else verification above should not have succeeded");
3140 }
3141 
3142 void ConcurrentMarkSweepGeneration::save_marks() {
3143   // delegate to CMS space
3144   cmsSpace()->save_marks();
3145   for (uint i = 0; i < ParallelGCThreads; i++) {
3146     _par_gc_thread_states[i]->promo.startTrackingPromotions();
3147   }
3148 }
3149 
3150 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3151   return cmsSpace()->no_allocs_since_save_marks();
3152 }
3153 
3154 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
3155                                                                 \
3156 void ConcurrentMarkSweepGeneration::                            \
3157 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
3158   cl->set_generation(this);                                     \
3159   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
3160   cl->reset_generation();                                       \
3161   save_marks();                                                 \
3162 }
3163 
3164 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3165 
3166 void
3167 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3168   cl->set_generation(this);
3169   younger_refs_in_space_iterate(_cmsSpace, cl);
3170   cl->reset_generation();
3171 }
3172 
3173 void
3174 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
3175   if (freelistLock()->owned_by_self()) {
3176     Generation::oop_iterate(mr, cl);
3177   } else {
3178     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3179     Generation::oop_iterate(mr, cl);
3180   }
3181 }
3182 
3183 void
3184 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3185   if (freelistLock()->owned_by_self()) {
3186     Generation::oop_iterate(cl);
3187   } else {
3188     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3189     Generation::oop_iterate(cl);
3190   }
3191 }
3192 
3193 void
3194 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3195   if (freelistLock()->owned_by_self()) {
3196     Generation::object_iterate(cl);
3197   } else {
3198     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3199     Generation::object_iterate(cl);
3200   }
3201 }
3202 
3203 void
3204 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3205   if (freelistLock()->owned_by_self()) {
3206     Generation::safe_object_iterate(cl);
3207   } else {
3208     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3209     Generation::safe_object_iterate(cl);
3210   }
3211 }
3212 
3213 void
3214 ConcurrentMarkSweepGeneration::post_compact() {
3215 }
3216 
3217 void
3218 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3219   // Fix the linear allocation blocks to look like free blocks.
3220 
3221   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3222   // are not called when the heap is verified during universe initialization and
3223   // at vm shutdown.
3224   if (freelistLock()->owned_by_self()) {
3225     cmsSpace()->prepare_for_verify();
3226   } else {
3227     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3228     cmsSpace()->prepare_for_verify();
3229   }
3230 }
3231 
3232 void
3233 ConcurrentMarkSweepGeneration::verify() {
3234   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3235   // are not called when the heap is verified during universe initialization and
3236   // at vm shutdown.
3237   if (freelistLock()->owned_by_self()) {
3238     cmsSpace()->verify();
3239   } else {
3240     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3241     cmsSpace()->verify();
3242   }
3243 }
3244 
3245 void CMSCollector::verify() {
3246   _cmsGen->verify();
3247 }
3248 
3249 #ifndef PRODUCT
3250 bool CMSCollector::overflow_list_is_empty() const {
3251   assert(_num_par_pushes >= 0, "Inconsistency");
3252   if (_overflow_list == NULL) {
3253     assert(_num_par_pushes == 0, "Inconsistency");
3254   }
3255   return _overflow_list == NULL;
3256 }
3257 
3258 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3259 // merely consolidate assertion checks that appear to occur together frequently.
3260 void CMSCollector::verify_work_stacks_empty() const {
3261   assert(_markStack.isEmpty(), "Marking stack should be empty");
3262   assert(overflow_list_is_empty(), "Overflow list should be empty");
3263 }
3264 
3265 void CMSCollector::verify_overflow_empty() const {
3266   assert(overflow_list_is_empty(), "Overflow list should be empty");
3267   assert(no_preserved_marks(), "No preserved marks");
3268 }
3269 #endif // PRODUCT
3270 
3271 // Decide if we want to enable class unloading as part of the
3272 // ensuing concurrent GC cycle. We will collect and
3273 // unload classes if it's the case that:
3274 // (1) an explicit gc request has been made and the flag
3275 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3276 // (2) (a) class unloading is enabled at the command line, and
3277 //     (b) old gen is getting really full
3278 // NOTE: Provided there is no change in the state of the heap between
3279 // calls to this method, it should have idempotent results. Moreover,
3280 // its results should be monotonically increasing (i.e. going from 0 to 1,
3281 // but not 1 to 0) between successive calls between which the heap was
3282 // not collected. For the implementation below, it must thus rely on
3283 // the property that concurrent_cycles_since_last_unload()
3284 // will not decrease unless a collection cycle happened and that
3285 // _cmsGen->is_too_full() are
3286 // themselves also monotonic in that sense. See check_monotonicity()
3287 // below.
3288 void CMSCollector::update_should_unload_classes() {
3289   _should_unload_classes = false;
3290   // Condition 1 above
3291   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3292     _should_unload_classes = true;
3293   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3294     // Disjuncts 2.b.(i,ii,iii) above
3295     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3296                               CMSClassUnloadingMaxInterval)
3297                            || _cmsGen->is_too_full();
3298   }
3299 }
3300 
3301 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3302   bool res = should_concurrent_collect();
3303   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3304   return res;
3305 }
3306 
3307 void CMSCollector::setup_cms_unloading_and_verification_state() {
3308   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3309                              || VerifyBeforeExit;
3310   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3311 
3312   if (should_unload_classes()) {   // Should unload classes this cycle
3313     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3314     set_verifying(should_verify);    // Set verification state for this cycle
3315     return;                            // Nothing else needs to be done at this time
3316   }
3317 
3318   // Not unloading classes this cycle
3319   assert(!should_unload_classes(), "Inconsitency!");
3320   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3321     // Include symbols, strings and code cache elements to prevent their resurrection.
3322     add_root_scanning_option(rso);
3323     set_verifying(true);
3324   } else if (verifying() && !should_verify) {
3325     // We were verifying, but some verification flags got disabled.
3326     set_verifying(false);
3327     // Exclude symbols, strings and code cache elements from root scanning to
3328     // reduce IM and RM pauses.
3329     remove_root_scanning_option(rso);
3330   }
3331 }
3332 
3333 
3334 #ifndef PRODUCT
3335 HeapWord* CMSCollector::block_start(const void* p) const {
3336   const HeapWord* addr = (HeapWord*)p;
3337   if (_span.contains(p)) {
3338     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3339       return _cmsGen->cmsSpace()->block_start(p);
3340     }
3341   }
3342   return NULL;
3343 }
3344 #endif
3345 
3346 HeapWord*
3347 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3348                                                    bool   tlab,
3349                                                    bool   parallel) {
3350   CMSSynchronousYieldRequest yr;
3351   assert(!tlab, "Can't deal with TLAB allocation");
3352   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3353   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3354     CMSExpansionCause::_satisfy_allocation);
3355   if (GCExpandToAllocateDelayMillis > 0) {
3356     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3357   }
3358   return have_lock_and_allocate(word_size, tlab);
3359 }
3360 
3361 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3362 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3363 // to CardGeneration and share it...
3364 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3365   return CardGeneration::expand(bytes, expand_bytes);
3366 }
3367 
3368 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3369   CMSExpansionCause::Cause cause)
3370 {
3371 
3372   bool success = expand(bytes, expand_bytes);
3373 
3374   // remember why we expanded; this information is used
3375   // by shouldConcurrentCollect() when making decisions on whether to start
3376   // a new CMS cycle.
3377   if (success) {
3378     set_expansion_cause(cause);
3379     if (PrintGCDetails && Verbose) {
3380       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3381         CMSExpansionCause::to_string(cause));
3382     }
3383   }
3384 }
3385 
3386 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3387   HeapWord* res = NULL;
3388   MutexLocker x(ParGCRareEvent_lock);
3389   while (true) {
3390     // Expansion by some other thread might make alloc OK now:
3391     res = ps->lab.alloc(word_sz);
3392     if (res != NULL) return res;
3393     // If there's not enough expansion space available, give up.
3394     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3395       return NULL;
3396     }
3397     // Otherwise, we try expansion.
3398     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3399       CMSExpansionCause::_allocate_par_lab);
3400     // Now go around the loop and try alloc again;
3401     // A competing par_promote might beat us to the expansion space,
3402     // so we may go around the loop again if promotion fails agaion.
3403     if (GCExpandToAllocateDelayMillis > 0) {
3404       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3405     }
3406   }
3407 }
3408 
3409 
3410 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3411   PromotionInfo* promo) {
3412   MutexLocker x(ParGCRareEvent_lock);
3413   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3414   while (true) {
3415     // Expansion by some other thread might make alloc OK now:
3416     if (promo->ensure_spooling_space()) {
3417       assert(promo->has_spooling_space(),
3418              "Post-condition of successful ensure_spooling_space()");
3419       return true;
3420     }
3421     // If there's not enough expansion space available, give up.
3422     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3423       return false;
3424     }
3425     // Otherwise, we try expansion.
3426     expand(refill_size_bytes, MinHeapDeltaBytes,
3427       CMSExpansionCause::_allocate_par_spooling_space);
3428     // Now go around the loop and try alloc again;
3429     // A competing allocation might beat us to the expansion space,
3430     // so we may go around the loop again if allocation fails again.
3431     if (GCExpandToAllocateDelayMillis > 0) {
3432       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3433     }
3434   }
3435 }
3436 
3437 
3438 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3439   assert_locked_or_safepoint(ExpandHeap_lock);
3440   // Shrink committed space
3441   _virtual_space.shrink_by(bytes);
3442   // Shrink space; this also shrinks the space's BOT
3443   _cmsSpace->set_end((HeapWord*) _virtual_space.high());
3444   size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3445   // Shrink the shared block offset array
3446   _bts->resize(new_word_size);
3447   MemRegion mr(_cmsSpace->bottom(), new_word_size);
3448   // Shrink the card table
3449   Universe::heap()->barrier_set()->resize_covered_region(mr);
3450 
3451   if (Verbose && PrintGC) {
3452     size_t new_mem_size = _virtual_space.committed_size();
3453     size_t old_mem_size = new_mem_size + bytes;
3454     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3455                   name(), old_mem_size/K, new_mem_size/K);
3456   }
3457 }
3458 
3459 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3460   assert_locked_or_safepoint(Heap_lock);
3461   size_t size = ReservedSpace::page_align_size_down(bytes);
3462   if (size > 0) {
3463     shrink_by(size);
3464   }
3465 }
3466 
3467 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3468   assert_locked_or_safepoint(Heap_lock);
3469   bool result = _virtual_space.expand_by(bytes);
3470   if (result) {
3471     size_t new_word_size =
3472       heap_word_size(_virtual_space.committed_size());
3473     MemRegion mr(_cmsSpace->bottom(), new_word_size);
3474     _bts->resize(new_word_size);  // resize the block offset shared array
3475     Universe::heap()->barrier_set()->resize_covered_region(mr);
3476     // Hmmmm... why doesn't CFLS::set_end verify locking?
3477     // This is quite ugly; FIX ME XXX
3478     _cmsSpace->assert_locked(freelistLock());
3479     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3480 
3481     // update the space and generation capacity counters
3482     if (UsePerfData) {
3483       _space_counters->update_capacity();
3484       _gen_counters->update_all();
3485     }
3486 
3487     if (Verbose && PrintGC) {
3488       size_t new_mem_size = _virtual_space.committed_size();
3489       size_t old_mem_size = new_mem_size - bytes;
3490       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3491                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
3492     }
3493   }
3494   return result;
3495 }
3496 
3497 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3498   assert_locked_or_safepoint(Heap_lock);
3499   bool success = true;
3500   const size_t remaining_bytes = _virtual_space.uncommitted_size();
3501   if (remaining_bytes > 0) {
3502     success = grow_by(remaining_bytes);
3503     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3504   }
3505   return success;
3506 }
3507 
3508 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3509   assert_locked_or_safepoint(Heap_lock);
3510   assert_lock_strong(freelistLock());
3511   if (PrintGCDetails && Verbose) {
3512     warning("Shrinking of CMS not yet implemented");
3513   }
3514   return;
3515 }
3516 
3517 
3518 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3519 // phases.
3520 class CMSPhaseAccounting: public StackObj {
3521  public:
3522   CMSPhaseAccounting(CMSCollector *collector,
3523                      const char *phase,
3524                      bool print_cr = true);
3525   ~CMSPhaseAccounting();
3526 
3527  private:
3528   CMSCollector *_collector;
3529   const char *_phase;
3530   elapsedTimer _wallclock;
3531   bool _print_cr;
3532 
3533  public:
3534   // Not MT-safe; so do not pass around these StackObj's
3535   // where they may be accessed by other threads.
3536   jlong wallclock_millis() {
3537     assert(_wallclock.is_active(), "Wall clock should not stop");
3538     _wallclock.stop();  // to record time
3539     jlong ret = _wallclock.milliseconds();
3540     _wallclock.start(); // restart
3541     return ret;
3542   }
3543 };
3544 
3545 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3546                                        const char *phase,
3547                                        bool print_cr) :
3548   _collector(collector), _phase(phase), _print_cr(print_cr) {
3549 
3550   if (PrintCMSStatistics != 0) {
3551     _collector->resetYields();
3552   }
3553   if (PrintGCDetails) {
3554     gclog_or_tty->date_stamp(PrintGCDateStamps);
3555     gclog_or_tty->stamp(PrintGCTimeStamps);
3556     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3557       _collector->cmsGen()->short_name(), _phase);
3558   }
3559   _collector->resetTimer();
3560   _wallclock.start();
3561   _collector->startTimer();
3562 }
3563 
3564 CMSPhaseAccounting::~CMSPhaseAccounting() {
3565   assert(_wallclock.is_active(), "Wall clock should not have stopped");
3566   _collector->stopTimer();
3567   _wallclock.stop();
3568   if (PrintGCDetails) {
3569     gclog_or_tty->date_stamp(PrintGCDateStamps);
3570     gclog_or_tty->stamp(PrintGCTimeStamps);
3571     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3572                  _collector->cmsGen()->short_name(),
3573                  _phase, _collector->timerValue(), _wallclock.seconds());
3574     if (_print_cr) {
3575       gclog_or_tty->print_cr("");
3576     }
3577     if (PrintCMSStatistics != 0) {
3578       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3579                     _collector->yields());
3580     }
3581   }
3582 }
3583 
3584 // CMS work
3585 
3586 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3587 class CMSParMarkTask : public AbstractGangTask {
3588  protected:
3589   CMSCollector*     _collector;
3590   int               _n_workers;
3591   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3592       AbstractGangTask(name),
3593       _collector(collector),
3594       _n_workers(n_workers) {}
3595   // Work method in support of parallel rescan ... of young gen spaces
3596   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3597                              ContiguousSpace* space,
3598                              HeapWord** chunk_array, size_t chunk_top);
3599   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3600 };
3601 
3602 // Parallel initial mark task
3603 class CMSParInitialMarkTask: public CMSParMarkTask {
3604  public:
3605   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3606       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3607                      collector, n_workers) {}
3608   void work(uint worker_id);
3609 };
3610 
3611 // Checkpoint the roots into this generation from outside
3612 // this generation. [Note this initial checkpoint need only
3613 // be approximate -- we'll do a catch up phase subsequently.]
3614 void CMSCollector::checkpointRootsInitial(bool asynch) {
3615   assert(_collectorState == InitialMarking, "Wrong collector state");
3616   check_correct_thread_executing();
3617   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3618 
3619   save_heap_summary();
3620   report_heap_summary(GCWhen::BeforeGC);
3621 
3622   ReferenceProcessor* rp = ref_processor();
3623   SpecializationStats::clear();
3624   assert(_restart_addr == NULL, "Control point invariant");
3625   if (asynch) {
3626     // acquire locks for subsequent manipulations
3627     MutexLockerEx x(bitMapLock(),
3628                     Mutex::_no_safepoint_check_flag);
3629     checkpointRootsInitialWork(asynch);
3630     // enable ("weak") refs discovery
3631     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3632     _collectorState = Marking;
3633   } else {
3634     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3635     // which recognizes if we are a CMS generation, and doesn't try to turn on
3636     // discovery; verify that they aren't meddling.
3637     assert(!rp->discovery_is_atomic(),
3638            "incorrect setting of discovery predicate");
3639     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3640            "ref discovery for this generation kind");
3641     // already have locks
3642     checkpointRootsInitialWork(asynch);
3643     // now enable ("weak") refs discovery
3644     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3645     _collectorState = Marking;
3646   }
3647   SpecializationStats::print();
3648 }
3649 
3650 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3651   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3652   assert(_collectorState == InitialMarking, "just checking");
3653 
3654   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3655   // precede our marking with a collection of all
3656   // younger generations to keep floating garbage to a minimum.
3657   // XXX: we won't do this for now -- it's an optimization to be done later.
3658 
3659   // already have locks
3660   assert_lock_strong(bitMapLock());
3661   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3662 
3663   // Setup the verification and class unloading state for this
3664   // CMS collection cycle.
3665   setup_cms_unloading_and_verification_state();
3666 
3667   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3668     PrintGCDetails && Verbose, true, _gc_timer_cm);)
3669   if (UseAdaptiveSizePolicy) {
3670     size_policy()->checkpoint_roots_initial_begin();
3671   }
3672 
3673   // Reset all the PLAB chunk arrays if necessary.
3674   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3675     reset_survivor_plab_arrays();
3676   }
3677 
3678   ResourceMark rm;
3679   HandleMark  hm;
3680 
3681   FalseClosure falseClosure;
3682   // In the case of a synchronous collection, we will elide the
3683   // remark step, so it's important to catch all the nmethod oops
3684   // in this step.
3685   // The final 'true' flag to gen_process_strong_roots will ensure this.
3686   // If 'async' is true, we can relax the nmethod tracing.
3687   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3688   GenCollectedHeap* gch = GenCollectedHeap::heap();
3689 
3690   verify_work_stacks_empty();
3691   verify_overflow_empty();
3692 
3693   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3694   // Update the saved marks which may affect the root scans.
3695   gch->save_marks();
3696 
3697   // weak reference processing has not started yet.
3698   ref_processor()->set_enqueuing_is_done(false);
3699 
3700   // Need to remember all newly created CLDs,
3701   // so that we can guarantee that the remark finds them.
3702   ClassLoaderDataGraph::remember_new_clds(true);
3703 
3704   // Whenever a CLD is found, it will be claimed before proceeding to mark
3705   // the klasses. The claimed marks need to be cleared before marking starts.
3706   ClassLoaderDataGraph::clear_claimed_marks();
3707 
3708   if (CMSPrintEdenSurvivorChunks) {
3709     print_eden_and_survivor_chunk_arrays();
3710   }
3711 
3712   {
3713     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3714     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3715       // The parallel version.
3716       FlexibleWorkGang* workers = gch->workers();
3717       assert(workers != NULL, "Need parallel worker threads.");
3718       int n_workers = workers->active_workers();
3719       CMSParInitialMarkTask tsk(this, n_workers);
3720       gch->set_par_threads(n_workers);
3721       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3722       if (n_workers > 1) {
3723         GenCollectedHeap::StrongRootsScope srs(gch);
3724         workers->run_task(&tsk);
3725       } else {
3726         GenCollectedHeap::StrongRootsScope srs(gch);
3727         tsk.work(0);
3728       }
3729       gch->set_par_threads(0);
3730     } else {
3731       // The serial version.
3732       CMKlassClosure klass_closure(&notOlder);
3733       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3734       gch->gen_process_strong_roots(_cmsGen->level(),
3735                                     true,   // younger gens are roots
3736                                     true,   // activate StrongRootsScope
3737                                     false,  // not scavenging
3738                                     SharedHeap::ScanningOption(roots_scanning_options()),
3739                                     &notOlder,
3740                                     true,   // walk all of code cache if (so & SO_CodeCache)
3741                                     NULL,
3742                                     &klass_closure);
3743     }
3744   }
3745 
3746   // Clear mod-union table; it will be dirtied in the prologue of
3747   // CMS generation per each younger generation collection.
3748 
3749   assert(_modUnionTable.isAllClear(),
3750        "Was cleared in most recent final checkpoint phase"
3751        " or no bits are set in the gc_prologue before the start of the next "
3752        "subsequent marking phase.");
3753 
3754   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3755 
3756   // Save the end of the used_region of the constituent generations
3757   // to be used to limit the extent of sweep in each generation.
3758   save_sweep_limits();
3759   if (UseAdaptiveSizePolicy) {
3760     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3761   }
3762   verify_overflow_empty();
3763 }
3764 
3765 bool CMSCollector::markFromRoots(bool asynch) {
3766   // we might be tempted to assert that:
3767   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3768   //        "inconsistent argument?");
3769   // However that wouldn't be right, because it's possible that
3770   // a safepoint is indeed in progress as a younger generation
3771   // stop-the-world GC happens even as we mark in this generation.
3772   assert(_collectorState == Marking, "inconsistent state?");
3773   check_correct_thread_executing();
3774   verify_overflow_empty();
3775 
3776   bool res;
3777   if (asynch) {
3778 
3779     // Start the timers for adaptive size policy for the concurrent phases
3780     // Do it here so that the foreground MS can use the concurrent
3781     // timer since a foreground MS might has the sweep done concurrently
3782     // or STW.
3783     if (UseAdaptiveSizePolicy) {
3784       size_policy()->concurrent_marking_begin();
3785     }
3786 
3787     // Weak ref discovery note: We may be discovering weak
3788     // refs in this generation concurrent (but interleaved) with
3789     // weak ref discovery by a younger generation collector.
3790 
3791     CMSTokenSyncWithLocks ts(true, bitMapLock());
3792     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3793     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3794     res = markFromRootsWork(asynch);
3795     if (res) {
3796       _collectorState = Precleaning;
3797     } else { // We failed and a foreground collection wants to take over
3798       assert(_foregroundGCIsActive, "internal state inconsistency");
3799       assert(_restart_addr == NULL,  "foreground will restart from scratch");
3800       if (PrintGCDetails) {
3801         gclog_or_tty->print_cr("bailing out to foreground collection");
3802       }
3803     }
3804     if (UseAdaptiveSizePolicy) {
3805       size_policy()->concurrent_marking_end();
3806     }
3807   } else {
3808     assert(SafepointSynchronize::is_at_safepoint(),
3809            "inconsistent with asynch == false");
3810     if (UseAdaptiveSizePolicy) {
3811       size_policy()->ms_collection_marking_begin();
3812     }
3813     // already have locks
3814     res = markFromRootsWork(asynch);
3815     _collectorState = FinalMarking;
3816     if (UseAdaptiveSizePolicy) {
3817       GenCollectedHeap* gch = GenCollectedHeap::heap();
3818       size_policy()->ms_collection_marking_end(gch->gc_cause());
3819     }
3820   }
3821   verify_overflow_empty();
3822   return res;
3823 }
3824 
3825 bool CMSCollector::markFromRootsWork(bool asynch) {
3826   // iterate over marked bits in bit map, doing a full scan and mark
3827   // from these roots using the following algorithm:
3828   // . if oop is to the right of the current scan pointer,
3829   //   mark corresponding bit (we'll process it later)
3830   // . else (oop is to left of current scan pointer)
3831   //   push oop on marking stack
3832   // . drain the marking stack
3833 
3834   // Note that when we do a marking step we need to hold the
3835   // bit map lock -- recall that direct allocation (by mutators)
3836   // and promotion (by younger generation collectors) is also
3837   // marking the bit map. [the so-called allocate live policy.]
3838   // Because the implementation of bit map marking is not
3839   // robust wrt simultaneous marking of bits in the same word,
3840   // we need to make sure that there is no such interference
3841   // between concurrent such updates.
3842 
3843   // already have locks
3844   assert_lock_strong(bitMapLock());
3845 
3846   verify_work_stacks_empty();
3847   verify_overflow_empty();
3848   bool result = false;
3849   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3850     result = do_marking_mt(asynch);
3851   } else {
3852     result = do_marking_st(asynch);
3853   }
3854   return result;
3855 }
3856 
3857 // Forward decl
3858 class CMSConcMarkingTask;
3859 
3860 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3861   CMSCollector*       _collector;
3862   CMSConcMarkingTask* _task;
3863  public:
3864   virtual void yield();
3865 
3866   // "n_threads" is the number of threads to be terminated.
3867   // "queue_set" is a set of work queues of other threads.
3868   // "collector" is the CMS collector associated with this task terminator.
3869   // "yield" indicates whether we need the gang as a whole to yield.
3870   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3871     ParallelTaskTerminator(n_threads, queue_set),
3872     _collector(collector) { }
3873 
3874   void set_task(CMSConcMarkingTask* task) {
3875     _task = task;
3876   }
3877 };
3878 
3879 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3880   CMSConcMarkingTask* _task;
3881  public:
3882   bool should_exit_termination();
3883   void set_task(CMSConcMarkingTask* task) {
3884     _task = task;
3885   }
3886 };
3887 
3888 // MT Concurrent Marking Task
3889 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3890   CMSCollector* _collector;
3891   int           _n_workers;                  // requested/desired # workers
3892   bool          _asynch;
3893   bool          _result;
3894   CompactibleFreeListSpace*  _cms_space;
3895   char          _pad_front[64];   // padding to ...
3896   HeapWord*     _global_finger;   // ... avoid sharing cache line
3897   char          _pad_back[64];
3898   HeapWord*     _restart_addr;
3899 
3900   //  Exposed here for yielding support
3901   Mutex* const _bit_map_lock;
3902 
3903   // The per thread work queues, available here for stealing
3904   OopTaskQueueSet*  _task_queues;
3905 
3906   // Termination (and yielding) support
3907   CMSConcMarkingTerminator _term;
3908   CMSConcMarkingTerminatorTerminator _term_term;
3909 
3910  public:
3911   CMSConcMarkingTask(CMSCollector* collector,
3912                  CompactibleFreeListSpace* cms_space,
3913                  bool asynch,
3914                  YieldingFlexibleWorkGang* workers,
3915                  OopTaskQueueSet* task_queues):
3916     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3917     _collector(collector),
3918     _cms_space(cms_space),
3919     _asynch(asynch), _n_workers(0), _result(true),
3920     _task_queues(task_queues),
3921     _term(_n_workers, task_queues, _collector),
3922     _bit_map_lock(collector->bitMapLock())
3923   {
3924     _requested_size = _n_workers;
3925     _term.set_task(this);
3926     _term_term.set_task(this);
3927     _restart_addr = _global_finger = _cms_space->bottom();
3928   }
3929 
3930 
3931   OopTaskQueueSet* task_queues()  { return _task_queues; }
3932 
3933   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3934 
3935   HeapWord** global_finger_addr() { return &_global_finger; }
3936 
3937   CMSConcMarkingTerminator* terminator() { return &_term; }
3938 
3939   virtual void set_for_termination(int active_workers) {
3940     terminator()->reset_for_reuse(active_workers);
3941   }
3942 
3943   void work(uint worker_id);
3944   bool should_yield() {
3945     return    ConcurrentMarkSweepThread::should_yield()
3946            && !_collector->foregroundGCIsActive()
3947            && _asynch;
3948   }
3949 
3950   virtual void coordinator_yield();  // stuff done by coordinator
3951   bool result() { return _result; }
3952 
3953   void reset(HeapWord* ra) {
3954     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3955     _restart_addr = _global_finger = ra;
3956     _term.reset_for_reuse();
3957   }
3958 
3959   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3960                                            OopTaskQueue* work_q);
3961 
3962  private:
3963   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3964   void do_work_steal(int i);
3965   void bump_global_finger(HeapWord* f);
3966 };
3967 
3968 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3969   assert(_task != NULL, "Error");
3970   return _task->yielding();
3971   // Note that we do not need the disjunct || _task->should_yield() above
3972   // because we want terminating threads to yield only if the task
3973   // is already in the midst of yielding, which happens only after at least one
3974   // thread has yielded.
3975 }
3976 
3977 void CMSConcMarkingTerminator::yield() {
3978   if (_task->should_yield()) {
3979     _task->yield();
3980   } else {
3981     ParallelTaskTerminator::yield();
3982   }
3983 }
3984 
3985 ////////////////////////////////////////////////////////////////
3986 // Concurrent Marking Algorithm Sketch
3987 ////////////////////////////////////////////////////////////////
3988 // Until all tasks exhausted (both spaces):
3989 // -- claim next available chunk
3990 // -- bump global finger via CAS
3991 // -- find first object that starts in this chunk
3992 //    and start scanning bitmap from that position
3993 // -- scan marked objects for oops
3994 // -- CAS-mark target, and if successful:
3995 //    . if target oop is above global finger (volatile read)
3996 //      nothing to do
3997 //    . if target oop is in chunk and above local finger
3998 //        then nothing to do
3999 //    . else push on work-queue
4000 // -- Deal with possible overflow issues:
4001 //    . local work-queue overflow causes stuff to be pushed on
4002 //      global (common) overflow queue
4003 //    . always first empty local work queue
4004 //    . then get a batch of oops from global work queue if any
4005 //    . then do work stealing
4006 // -- When all tasks claimed (both spaces)
4007 //    and local work queue empty,
4008 //    then in a loop do:
4009 //    . check global overflow stack; steal a batch of oops and trace
4010 //    . try to steal from other threads oif GOS is empty
4011 //    . if neither is available, offer termination
4012 // -- Terminate and return result
4013 //
4014 void CMSConcMarkingTask::work(uint worker_id) {
4015   elapsedTimer _timer;
4016   ResourceMark rm;
4017   HandleMark hm;
4018 
4019   DEBUG_ONLY(_collector->verify_overflow_empty();)
4020 
4021   // Before we begin work, our work queue should be empty
4022   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4023   // Scan the bitmap covering _cms_space, tracing through grey objects.
4024   _timer.start();
4025   do_scan_and_mark(worker_id, _cms_space);
4026   _timer.stop();
4027   if (PrintCMSStatistics != 0) {
4028     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4029       worker_id, _timer.seconds());
4030       // XXX: need xxx/xxx type of notation, two timers
4031   }
4032 
4033   // ... do work stealing
4034   _timer.reset();
4035   _timer.start();
4036   do_work_steal(worker_id);
4037   _timer.stop();
4038   if (PrintCMSStatistics != 0) {
4039     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4040       worker_id, _timer.seconds());
4041       // XXX: need xxx/xxx type of notation, two timers
4042   }
4043   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4044   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4045   // Note that under the current task protocol, the
4046   // following assertion is true even of the spaces
4047   // expanded since the completion of the concurrent
4048   // marking. XXX This will likely change under a strict
4049   // ABORT semantics.
4050   // After perm removal the comparison was changed to
4051   // greater than or equal to from strictly greater than.
4052   // Before perm removal the highest address sweep would
4053   // have been at the end of perm gen but now is at the
4054   // end of the tenured gen.
4055   assert(_global_finger >=  _cms_space->end(),
4056          "All tasks have been completed");
4057   DEBUG_ONLY(_collector->verify_overflow_empty();)
4058 }
4059 
4060 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4061   HeapWord* read = _global_finger;
4062   HeapWord* cur  = read;
4063   while (f > read) {
4064     cur = read;
4065     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4066     if (cur == read) {
4067       // our cas succeeded
4068       assert(_global_finger >= f, "protocol consistency");
4069       break;
4070     }
4071   }
4072 }
4073 
4074 // This is really inefficient, and should be redone by
4075 // using (not yet available) block-read and -write interfaces to the
4076 // stack and the work_queue. XXX FIX ME !!!
4077 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4078                                                       OopTaskQueue* work_q) {
4079   // Fast lock-free check
4080   if (ovflw_stk->length() == 0) {
4081     return false;
4082   }
4083   assert(work_q->size() == 0, "Shouldn't steal");
4084   MutexLockerEx ml(ovflw_stk->par_lock(),
4085                    Mutex::_no_safepoint_check_flag);
4086   // Grab up to 1/4 the size of the work queue
4087   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4088                     (size_t)ParGCDesiredObjsFromOverflowList);
4089   num = MIN2(num, ovflw_stk->length());
4090   for (int i = (int) num; i > 0; i--) {
4091     oop cur = ovflw_stk->pop();
4092     assert(cur != NULL, "Counted wrong?");
4093     work_q->push(cur);
4094   }
4095   return num > 0;
4096 }
4097 
4098 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4099   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4100   int n_tasks = pst->n_tasks();
4101   // We allow that there may be no tasks to do here because
4102   // we are restarting after a stack overflow.
4103   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4104   uint nth_task = 0;
4105 
4106   HeapWord* aligned_start = sp->bottom();
4107   if (sp->used_region().contains(_restart_addr)) {
4108     // Align down to a card boundary for the start of 0th task
4109     // for this space.
4110     aligned_start =
4111       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4112                                  CardTableModRefBS::card_size);
4113   }
4114 
4115   size_t chunk_size = sp->marking_task_size();
4116   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4117     // Having claimed the nth task in this space,
4118     // compute the chunk that it corresponds to:
4119     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4120                                aligned_start + (nth_task+1)*chunk_size);
4121     // Try and bump the global finger via a CAS;
4122     // note that we need to do the global finger bump
4123     // _before_ taking the intersection below, because
4124     // the task corresponding to that region will be
4125     // deemed done even if the used_region() expands
4126     // because of allocation -- as it almost certainly will
4127     // during start-up while the threads yield in the
4128     // closure below.
4129     HeapWord* finger = span.end();
4130     bump_global_finger(finger);   // atomically
4131     // There are null tasks here corresponding to chunks
4132     // beyond the "top" address of the space.
4133     span = span.intersection(sp->used_region());
4134     if (!span.is_empty()) {  // Non-null task
4135       HeapWord* prev_obj;
4136       assert(!span.contains(_restart_addr) || nth_task == 0,
4137              "Inconsistency");
4138       if (nth_task == 0) {
4139         // For the 0th task, we'll not need to compute a block_start.
4140         if (span.contains(_restart_addr)) {
4141           // In the case of a restart because of stack overflow,
4142           // we might additionally skip a chunk prefix.
4143           prev_obj = _restart_addr;
4144         } else {
4145           prev_obj = span.start();
4146         }
4147       } else {
4148         // We want to skip the first object because
4149         // the protocol is to scan any object in its entirety
4150         // that _starts_ in this span; a fortiori, any
4151         // object starting in an earlier span is scanned
4152         // as part of an earlier claimed task.
4153         // Below we use the "careful" version of block_start
4154         // so we do not try to navigate uninitialized objects.
4155         prev_obj = sp->block_start_careful(span.start());
4156         // Below we use a variant of block_size that uses the
4157         // Printezis bits to avoid waiting for allocated
4158         // objects to become initialized/parsable.
4159         while (prev_obj < span.start()) {
4160           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4161           if (sz > 0) {
4162             prev_obj += sz;
4163           } else {
4164             // In this case we may end up doing a bit of redundant
4165             // scanning, but that appears unavoidable, short of
4166             // locking the free list locks; see bug 6324141.
4167             break;
4168           }
4169         }
4170       }
4171       if (prev_obj < span.end()) {
4172         MemRegion my_span = MemRegion(prev_obj, span.end());
4173         // Do the marking work within a non-empty span --
4174         // the last argument to the constructor indicates whether the
4175         // iteration should be incremental with periodic yields.
4176         Par_MarkFromRootsClosure cl(this, _collector, my_span,
4177                                     &_collector->_markBitMap,
4178                                     work_queue(i),
4179                                     &_collector->_markStack,
4180                                     _asynch);
4181         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4182       } // else nothing to do for this task
4183     }   // else nothing to do for this task
4184   }
4185   // We'd be tempted to assert here that since there are no
4186   // more tasks left to claim in this space, the global_finger
4187   // must exceed space->top() and a fortiori space->end(). However,
4188   // that would not quite be correct because the bumping of
4189   // global_finger occurs strictly after the claiming of a task,
4190   // so by the time we reach here the global finger may not yet
4191   // have been bumped up by the thread that claimed the last
4192   // task.
4193   pst->all_tasks_completed();
4194 }
4195 
4196 class Par_ConcMarkingClosure: public CMSOopClosure {
4197  private:
4198   CMSCollector* _collector;
4199   CMSConcMarkingTask* _task;
4200   MemRegion     _span;
4201   CMSBitMap*    _bit_map;
4202   CMSMarkStack* _overflow_stack;
4203   OopTaskQueue* _work_queue;
4204  protected:
4205   DO_OOP_WORK_DEFN
4206  public:
4207   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4208                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4209     CMSOopClosure(collector->ref_processor()),
4210     _collector(collector),
4211     _task(task),
4212     _span(collector->_span),
4213     _work_queue(work_queue),
4214     _bit_map(bit_map),
4215     _overflow_stack(overflow_stack)
4216   { }
4217   virtual void do_oop(oop* p);
4218   virtual void do_oop(narrowOop* p);
4219 
4220   void trim_queue(size_t max);
4221   void handle_stack_overflow(HeapWord* lost);
4222   void do_yield_check() {
4223     if (_task->should_yield()) {
4224       _task->yield();
4225     }
4226   }
4227 };
4228 
4229 // Grey object scanning during work stealing phase --
4230 // the salient assumption here is that any references
4231 // that are in these stolen objects being scanned must
4232 // already have been initialized (else they would not have
4233 // been published), so we do not need to check for
4234 // uninitialized objects before pushing here.
4235 void Par_ConcMarkingClosure::do_oop(oop obj) {
4236   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4237   HeapWord* addr = (HeapWord*)obj;
4238   // Check if oop points into the CMS generation
4239   // and is not marked
4240   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4241     // a white object ...
4242     // If we manage to "claim" the object, by being the
4243     // first thread to mark it, then we push it on our
4244     // marking stack
4245     if (_bit_map->par_mark(addr)) {     // ... now grey
4246       // push on work queue (grey set)
4247       bool simulate_overflow = false;
4248       NOT_PRODUCT(
4249         if (CMSMarkStackOverflowALot &&
4250             _collector->simulate_overflow()) {
4251           // simulate a stack overflow
4252           simulate_overflow = true;
4253         }
4254       )
4255       if (simulate_overflow ||
4256           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4257         // stack overflow
4258         if (PrintCMSStatistics != 0) {
4259           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4260                                  SIZE_FORMAT, _overflow_stack->capacity());
4261         }
4262         // We cannot assert that the overflow stack is full because
4263         // it may have been emptied since.
4264         assert(simulate_overflow ||
4265                _work_queue->size() == _work_queue->max_elems(),
4266               "Else push should have succeeded");
4267         handle_stack_overflow(addr);
4268       }
4269     } // Else, some other thread got there first
4270     do_yield_check();
4271   }
4272 }
4273 
4274 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
4275 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4276 
4277 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4278   while (_work_queue->size() > max) {
4279     oop new_oop;
4280     if (_work_queue->pop_local(new_oop)) {
4281       assert(new_oop->is_oop(), "Should be an oop");
4282       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4283       assert(_span.contains((HeapWord*)new_oop), "Not in span");
4284       new_oop->oop_iterate(this);  // do_oop() above
4285       do_yield_check();
4286     }
4287   }
4288 }
4289 
4290 // Upon stack overflow, we discard (part of) the stack,
4291 // remembering the least address amongst those discarded
4292 // in CMSCollector's _restart_address.
4293 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4294   // We need to do this under a mutex to prevent other
4295   // workers from interfering with the work done below.
4296   MutexLockerEx ml(_overflow_stack->par_lock(),
4297                    Mutex::_no_safepoint_check_flag);
4298   // Remember the least grey address discarded
4299   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4300   _collector->lower_restart_addr(ra);
4301   _overflow_stack->reset();  // discard stack contents
4302   _overflow_stack->expand(); // expand the stack if possible
4303 }
4304 
4305 
4306 void CMSConcMarkingTask::do_work_steal(int i) {
4307   OopTaskQueue* work_q = work_queue(i);
4308   oop obj_to_scan;
4309   CMSBitMap* bm = &(_collector->_markBitMap);
4310   CMSMarkStack* ovflw = &(_collector->_markStack);
4311   int* seed = _collector->hash_seed(i);
4312   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4313   while (true) {
4314     cl.trim_queue(0);
4315     assert(work_q->size() == 0, "Should have been emptied above");
4316     if (get_work_from_overflow_stack(ovflw, work_q)) {
4317       // Can't assert below because the work obtained from the
4318       // overflow stack may already have been stolen from us.
4319       // assert(work_q->size() > 0, "Work from overflow stack");
4320       continue;
4321     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4322       assert(obj_to_scan->is_oop(), "Should be an oop");
4323       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4324       obj_to_scan->oop_iterate(&cl);
4325     } else if (terminator()->offer_termination(&_term_term)) {
4326       assert(work_q->size() == 0, "Impossible!");
4327       break;
4328     } else if (yielding() || should_yield()) {
4329       yield();
4330     }
4331   }
4332 }
4333 
4334 // This is run by the CMS (coordinator) thread.
4335 void CMSConcMarkingTask::coordinator_yield() {
4336   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4337          "CMS thread should hold CMS token");
4338   // First give up the locks, then yield, then re-lock
4339   // We should probably use a constructor/destructor idiom to
4340   // do this unlock/lock or modify the MutexUnlocker class to
4341   // serve our purpose. XXX
4342   assert_lock_strong(_bit_map_lock);
4343   _bit_map_lock->unlock();
4344   ConcurrentMarkSweepThread::desynchronize(true);
4345   ConcurrentMarkSweepThread::acknowledge_yield_request();
4346   _collector->stopTimer();
4347   if (PrintCMSStatistics != 0) {
4348     _collector->incrementYields();
4349   }
4350   _collector->icms_wait();
4351 
4352   // It is possible for whichever thread initiated the yield request
4353   // not to get a chance to wake up and take the bitmap lock between
4354   // this thread releasing it and reacquiring it. So, while the
4355   // should_yield() flag is on, let's sleep for a bit to give the
4356   // other thread a chance to wake up. The limit imposed on the number
4357   // of iterations is defensive, to avoid any unforseen circumstances
4358   // putting us into an infinite loop. Since it's always been this
4359   // (coordinator_yield()) method that was observed to cause the
4360   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4361   // which is by default non-zero. For the other seven methods that
4362   // also perform the yield operation, as are using a different
4363   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4364   // can enable the sleeping for those methods too, if necessary.
4365   // See 6442774.
4366   //
4367   // We really need to reconsider the synchronization between the GC
4368   // thread and the yield-requesting threads in the future and we
4369   // should really use wait/notify, which is the recommended
4370   // way of doing this type of interaction. Additionally, we should
4371   // consolidate the eight methods that do the yield operation and they
4372   // are almost identical into one for better maintenability and
4373   // readability. See 6445193.
4374   //
4375   // Tony 2006.06.29
4376   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4377                    ConcurrentMarkSweepThread::should_yield() &&
4378                    !CMSCollector::foregroundGCIsActive(); ++i) {
4379     os::sleep(Thread::current(), 1, false);
4380     ConcurrentMarkSweepThread::acknowledge_yield_request();
4381   }
4382 
4383   ConcurrentMarkSweepThread::synchronize(true);
4384   _bit_map_lock->lock_without_safepoint_check();
4385   _collector->startTimer();
4386 }
4387 
4388 bool CMSCollector::do_marking_mt(bool asynch) {
4389   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4390   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4391                                        conc_workers()->total_workers(),
4392                                        conc_workers()->active_workers(),
4393                                        Threads::number_of_non_daemon_threads());
4394   conc_workers()->set_active_workers(num_workers);
4395 
4396   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4397 
4398   CMSConcMarkingTask tsk(this,
4399                          cms_space,
4400                          asynch,
4401                          conc_workers(),
4402                          task_queues());
4403 
4404   // Since the actual number of workers we get may be different
4405   // from the number we requested above, do we need to do anything different
4406   // below? In particular, may be we need to subclass the SequantialSubTasksDone
4407   // class?? XXX
4408   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4409 
4410   // Refs discovery is already non-atomic.
4411   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4412   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4413   conc_workers()->start_task(&tsk);
4414   while (tsk.yielded()) {
4415     tsk.coordinator_yield();
4416     conc_workers()->continue_task(&tsk);
4417   }
4418   // If the task was aborted, _restart_addr will be non-NULL
4419   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4420   while (_restart_addr != NULL) {
4421     // XXX For now we do not make use of ABORTED state and have not
4422     // yet implemented the right abort semantics (even in the original
4423     // single-threaded CMS case). That needs some more investigation
4424     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4425     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4426     // If _restart_addr is non-NULL, a marking stack overflow
4427     // occurred; we need to do a fresh marking iteration from the
4428     // indicated restart address.
4429     if (_foregroundGCIsActive && asynch) {
4430       // We may be running into repeated stack overflows, having
4431       // reached the limit of the stack size, while making very
4432       // slow forward progress. It may be best to bail out and
4433       // let the foreground collector do its job.
4434       // Clear _restart_addr, so that foreground GC
4435       // works from scratch. This avoids the headache of
4436       // a "rescan" which would otherwise be needed because
4437       // of the dirty mod union table & card table.
4438       _restart_addr = NULL;
4439       return false;
4440     }
4441     // Adjust the task to restart from _restart_addr
4442     tsk.reset(_restart_addr);
4443     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4444                   _restart_addr);
4445     _restart_addr = NULL;
4446     // Get the workers going again
4447     conc_workers()->start_task(&tsk);
4448     while (tsk.yielded()) {
4449       tsk.coordinator_yield();
4450       conc_workers()->continue_task(&tsk);
4451     }
4452   }
4453   assert(tsk.completed(), "Inconsistency");
4454   assert(tsk.result() == true, "Inconsistency");
4455   return true;
4456 }
4457 
4458 bool CMSCollector::do_marking_st(bool asynch) {
4459   ResourceMark rm;
4460   HandleMark   hm;
4461 
4462   // Temporarily make refs discovery single threaded (non-MT)
4463   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4464   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4465     &_markStack, CMSYield && asynch);
4466   // the last argument to iterate indicates whether the iteration
4467   // should be incremental with periodic yields.
4468   _markBitMap.iterate(&markFromRootsClosure);
4469   // If _restart_addr is non-NULL, a marking stack overflow
4470   // occurred; we need to do a fresh iteration from the
4471   // indicated restart address.
4472   while (_restart_addr != NULL) {
4473     if (_foregroundGCIsActive && asynch) {
4474       // We may be running into repeated stack overflows, having
4475       // reached the limit of the stack size, while making very
4476       // slow forward progress. It may be best to bail out and
4477       // let the foreground collector do its job.
4478       // Clear _restart_addr, so that foreground GC
4479       // works from scratch. This avoids the headache of
4480       // a "rescan" which would otherwise be needed because
4481       // of the dirty mod union table & card table.
4482       _restart_addr = NULL;
4483       return false;  // indicating failure to complete marking
4484     }
4485     // Deal with stack overflow:
4486     // we restart marking from _restart_addr
4487     HeapWord* ra = _restart_addr;
4488     markFromRootsClosure.reset(ra);
4489     _restart_addr = NULL;
4490     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4491   }
4492   return true;
4493 }
4494 
4495 void CMSCollector::preclean() {
4496   check_correct_thread_executing();
4497   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4498   verify_work_stacks_empty();
4499   verify_overflow_empty();
4500   _abort_preclean = false;
4501   if (CMSPrecleaningEnabled) {
4502     if (!CMSEdenChunksRecordAlways) {
4503       _eden_chunk_index = 0;
4504     }
4505     size_t used = get_eden_used();
4506     size_t capacity = get_eden_capacity();
4507     // Don't start sampling unless we will get sufficiently
4508     // many samples.
4509     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4510                 * CMSScheduleRemarkEdenPenetration)) {
4511       _start_sampling = true;
4512     } else {
4513       _start_sampling = false;
4514     }
4515     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4516     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4517     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4518   }
4519   CMSTokenSync x(true); // is cms thread
4520   if (CMSPrecleaningEnabled) {
4521     sample_eden();
4522     _collectorState = AbortablePreclean;
4523   } else {
4524     _collectorState = FinalMarking;
4525   }
4526   verify_work_stacks_empty();
4527   verify_overflow_empty();
4528 }
4529 
4530 // Try and schedule the remark such that young gen
4531 // occupancy is CMSScheduleRemarkEdenPenetration %.
4532 void CMSCollector::abortable_preclean() {
4533   check_correct_thread_executing();
4534   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
4535   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4536 
4537   // If Eden's current occupancy is below this threshold,
4538   // immediately schedule the remark; else preclean
4539   // past the next scavenge in an effort to
4540   // schedule the pause as described avove. By choosing
4541   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4542   // we will never do an actual abortable preclean cycle.
4543   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4544     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4545     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4546     // We need more smarts in the abortable preclean
4547     // loop below to deal with cases where allocation
4548     // in young gen is very very slow, and our precleaning
4549     // is running a losing race against a horde of
4550     // mutators intent on flooding us with CMS updates
4551     // (dirty cards).
4552     // One, admittedly dumb, strategy is to give up
4553     // after a certain number of abortable precleaning loops
4554     // or after a certain maximum time. We want to make
4555     // this smarter in the next iteration.
4556     // XXX FIX ME!!! YSR
4557     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4558     while (!(should_abort_preclean() ||
4559              ConcurrentMarkSweepThread::should_terminate())) {
4560       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4561       cumworkdone += workdone;
4562       loops++;
4563       // Voluntarily terminate abortable preclean phase if we have
4564       // been at it for too long.
4565       if ((CMSMaxAbortablePrecleanLoops != 0) &&
4566           loops >= CMSMaxAbortablePrecleanLoops) {
4567         if (PrintGCDetails) {
4568           gclog_or_tty->print(" CMS: abort preclean due to loops ");
4569         }
4570         break;
4571       }
4572       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4573         if (PrintGCDetails) {
4574           gclog_or_tty->print(" CMS: abort preclean due to time ");
4575         }
4576         break;
4577       }
4578       // If we are doing little work each iteration, we should
4579       // take a short break.
4580       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4581         // Sleep for some time, waiting for work to accumulate
4582         stopTimer();
4583         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4584         startTimer();
4585         waited++;
4586       }
4587     }
4588     if (PrintCMSStatistics > 0) {
4589       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4590                           loops, waited, cumworkdone);
4591     }
4592   }
4593   CMSTokenSync x(true); // is cms thread
4594   if (_collectorState != Idling) {
4595     assert(_collectorState == AbortablePreclean,
4596            "Spontaneous state transition?");
4597     _collectorState = FinalMarking;
4598   } // Else, a foreground collection completed this CMS cycle.
4599   return;
4600 }
4601 
4602 // Respond to an Eden sampling opportunity
4603 void CMSCollector::sample_eden() {
4604   // Make sure a young gc cannot sneak in between our
4605   // reading and recording of a sample.
4606   assert(Thread::current()->is_ConcurrentGC_thread(),
4607          "Only the cms thread may collect Eden samples");
4608   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4609          "Should collect samples while holding CMS token");
4610   if (!_start_sampling) {
4611     return;
4612   }
4613   // When CMSEdenChunksRecordAlways is true, the eden chunk array
4614   // is populated by the young generation.
4615   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4616     if (_eden_chunk_index < _eden_chunk_capacity) {
4617       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4618       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4619              "Unexpected state of Eden");
4620       // We'd like to check that what we just sampled is an oop-start address;
4621       // however, we cannot do that here since the object may not yet have been
4622       // initialized. So we'll instead do the check when we _use_ this sample
4623       // later.
4624       if (_eden_chunk_index == 0 ||
4625           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4626                          _eden_chunk_array[_eden_chunk_index-1])
4627            >= CMSSamplingGrain)) {
4628         _eden_chunk_index++;  // commit sample
4629       }
4630     }
4631   }
4632   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4633     size_t used = get_eden_used();
4634     size_t capacity = get_eden_capacity();
4635     assert(used <= capacity, "Unexpected state of Eden");
4636     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4637       _abort_preclean = true;
4638     }
4639   }
4640 }
4641 
4642 
4643 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4644   assert(_collectorState == Precleaning ||
4645          _collectorState == AbortablePreclean, "incorrect state");
4646   ResourceMark rm;
4647   HandleMark   hm;
4648 
4649   // Precleaning is currently not MT but the reference processor
4650   // may be set for MT.  Disable it temporarily here.
4651   ReferenceProcessor* rp = ref_processor();
4652   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4653 
4654   // Do one pass of scrubbing the discovered reference lists
4655   // to remove any reference objects with strongly-reachable
4656   // referents.
4657   if (clean_refs) {
4658     CMSPrecleanRefsYieldClosure yield_cl(this);
4659     assert(rp->span().equals(_span), "Spans should be equal");
4660     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4661                                    &_markStack, true /* preclean */);
4662     CMSDrainMarkingStackClosure complete_trace(this,
4663                                    _span, &_markBitMap, &_markStack,
4664                                    &keep_alive, true /* preclean */);
4665 
4666     // We don't want this step to interfere with a young
4667     // collection because we don't want to take CPU
4668     // or memory bandwidth away from the young GC threads
4669     // (which may be as many as there are CPUs).
4670     // Note that we don't need to protect ourselves from
4671     // interference with mutators because they can't
4672     // manipulate the discovered reference lists nor affect
4673     // the computed reachability of the referents, the
4674     // only properties manipulated by the precleaning
4675     // of these reference lists.
4676     stopTimer();
4677     CMSTokenSyncWithLocks x(true /* is cms thread */,
4678                             bitMapLock());
4679     startTimer();
4680     sample_eden();
4681 
4682     // The following will yield to allow foreground
4683     // collection to proceed promptly. XXX YSR:
4684     // The code in this method may need further
4685     // tweaking for better performance and some restructuring
4686     // for cleaner interfaces.
4687     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4688     rp->preclean_discovered_references(
4689           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4690           gc_timer);
4691   }
4692 
4693   if (clean_survivor) {  // preclean the active survivor space(s)
4694     assert(_young_gen->kind() == Generation::DefNew ||
4695            _young_gen->kind() == Generation::ParNew ||
4696            _young_gen->kind() == Generation::ASParNew,
4697          "incorrect type for cast");
4698     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4699     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4700                              &_markBitMap, &_modUnionTable,
4701                              &_markStack, true /* precleaning phase */);
4702     stopTimer();
4703     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4704                              bitMapLock());
4705     startTimer();
4706     unsigned int before_count =
4707       GenCollectedHeap::heap()->total_collections();
4708     SurvivorSpacePrecleanClosure
4709       sss_cl(this, _span, &_markBitMap, &_markStack,
4710              &pam_cl, before_count, CMSYield);
4711     dng->from()->object_iterate_careful(&sss_cl);
4712     dng->to()->object_iterate_careful(&sss_cl);
4713   }
4714   MarkRefsIntoAndScanClosure
4715     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4716              &_markStack, this, CMSYield,
4717              true /* precleaning phase */);
4718   // CAUTION: The following closure has persistent state that may need to
4719   // be reset upon a decrease in the sequence of addresses it
4720   // processes.
4721   ScanMarkedObjectsAgainCarefullyClosure
4722     smoac_cl(this, _span,
4723       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4724 
4725   // Preclean dirty cards in ModUnionTable and CardTable using
4726   // appropriate convergence criterion;
4727   // repeat CMSPrecleanIter times unless we find that
4728   // we are losing.
4729   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4730   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4731          "Bad convergence multiplier");
4732   assert(CMSPrecleanThreshold >= 100,
4733          "Unreasonably low CMSPrecleanThreshold");
4734 
4735   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4736   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4737        numIter < CMSPrecleanIter;
4738        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4739     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4740     if (Verbose && PrintGCDetails) {
4741       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4742     }
4743     // Either there are very few dirty cards, so re-mark
4744     // pause will be small anyway, or our pre-cleaning isn't
4745     // that much faster than the rate at which cards are being
4746     // dirtied, so we might as well stop and re-mark since
4747     // precleaning won't improve our re-mark time by much.
4748     if (curNumCards <= CMSPrecleanThreshold ||
4749         (numIter > 0 &&
4750          (curNumCards * CMSPrecleanDenominator >
4751          lastNumCards * CMSPrecleanNumerator))) {
4752       numIter++;
4753       cumNumCards += curNumCards;
4754       break;
4755     }
4756   }
4757 
4758   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4759 
4760   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4761   cumNumCards += curNumCards;
4762   if (PrintGCDetails && PrintCMSStatistics != 0) {
4763     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4764                   curNumCards, cumNumCards, numIter);
4765   }
4766   return cumNumCards;   // as a measure of useful work done
4767 }
4768 
4769 // PRECLEANING NOTES:
4770 // Precleaning involves:
4771 // . reading the bits of the modUnionTable and clearing the set bits.
4772 // . For the cards corresponding to the set bits, we scan the
4773 //   objects on those cards. This means we need the free_list_lock
4774 //   so that we can safely iterate over the CMS space when scanning
4775 //   for oops.
4776 // . When we scan the objects, we'll be both reading and setting
4777 //   marks in the marking bit map, so we'll need the marking bit map.
4778 // . For protecting _collector_state transitions, we take the CGC_lock.
4779 //   Note that any races in the reading of of card table entries by the
4780 //   CMS thread on the one hand and the clearing of those entries by the
4781 //   VM thread or the setting of those entries by the mutator threads on the
4782 //   other are quite benign. However, for efficiency it makes sense to keep
4783 //   the VM thread from racing with the CMS thread while the latter is
4784 //   dirty card info to the modUnionTable. We therefore also use the
4785 //   CGC_lock to protect the reading of the card table and the mod union
4786 //   table by the CM thread.
4787 // . We run concurrently with mutator updates, so scanning
4788 //   needs to be done carefully  -- we should not try to scan
4789 //   potentially uninitialized objects.
4790 //
4791 // Locking strategy: While holding the CGC_lock, we scan over and
4792 // reset a maximal dirty range of the mod union / card tables, then lock
4793 // the free_list_lock and bitmap lock to do a full marking, then
4794 // release these locks; and repeat the cycle. This allows for a
4795 // certain amount of fairness in the sharing of these locks between
4796 // the CMS collector on the one hand, and the VM thread and the
4797 // mutators on the other.
4798 
4799 // NOTE: preclean_mod_union_table() and preclean_card_table()
4800 // further below are largely identical; if you need to modify
4801 // one of these methods, please check the other method too.
4802 
4803 size_t CMSCollector::preclean_mod_union_table(
4804   ConcurrentMarkSweepGeneration* gen,
4805   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4806   verify_work_stacks_empty();
4807   verify_overflow_empty();
4808 
4809   // strategy: starting with the first card, accumulate contiguous
4810   // ranges of dirty cards; clear these cards, then scan the region
4811   // covered by these cards.
4812 
4813   // Since all of the MUT is committed ahead, we can just use
4814   // that, in case the generations expand while we are precleaning.
4815   // It might also be fine to just use the committed part of the
4816   // generation, but we might potentially miss cards when the
4817   // generation is rapidly expanding while we are in the midst
4818   // of precleaning.
4819   HeapWord* startAddr = gen->reserved().start();
4820   HeapWord* endAddr   = gen->reserved().end();
4821 
4822   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4823 
4824   size_t numDirtyCards, cumNumDirtyCards;
4825   HeapWord *nextAddr, *lastAddr;
4826   for (cumNumDirtyCards = numDirtyCards = 0,
4827        nextAddr = lastAddr = startAddr;
4828        nextAddr < endAddr;
4829        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4830 
4831     ResourceMark rm;
4832     HandleMark   hm;
4833 
4834     MemRegion dirtyRegion;
4835     {
4836       stopTimer();
4837       // Potential yield point
4838       CMSTokenSync ts(true);
4839       startTimer();
4840       sample_eden();
4841       // Get dirty region starting at nextOffset (inclusive),
4842       // simultaneously clearing it.
4843       dirtyRegion =
4844         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4845       assert(dirtyRegion.start() >= nextAddr,
4846              "returned region inconsistent?");
4847     }
4848     // Remember where the next search should begin.
4849     // The returned region (if non-empty) is a right open interval,
4850     // so lastOffset is obtained from the right end of that
4851     // interval.
4852     lastAddr = dirtyRegion.end();
4853     // Should do something more transparent and less hacky XXX
4854     numDirtyCards =
4855       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4856 
4857     // We'll scan the cards in the dirty region (with periodic
4858     // yields for foreground GC as needed).
4859     if (!dirtyRegion.is_empty()) {
4860       assert(numDirtyCards > 0, "consistency check");
4861       HeapWord* stop_point = NULL;
4862       stopTimer();
4863       // Potential yield point
4864       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4865                                bitMapLock());
4866       startTimer();
4867       {
4868         verify_work_stacks_empty();
4869         verify_overflow_empty();
4870         sample_eden();
4871         stop_point =
4872           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4873       }
4874       if (stop_point != NULL) {
4875         // The careful iteration stopped early either because it found an
4876         // uninitialized object, or because we were in the midst of an
4877         // "abortable preclean", which should now be aborted. Redirty
4878         // the bits corresponding to the partially-scanned or unscanned
4879         // cards. We'll either restart at the next block boundary or
4880         // abort the preclean.
4881         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4882                "Should only be AbortablePreclean.");
4883         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4884         if (should_abort_preclean()) {
4885           break; // out of preclean loop
4886         } else {
4887           // Compute the next address at which preclean should pick up;
4888           // might need bitMapLock in order to read P-bits.
4889           lastAddr = next_card_start_after_block(stop_point);
4890         }
4891       }
4892     } else {
4893       assert(lastAddr == endAddr, "consistency check");
4894       assert(numDirtyCards == 0, "consistency check");
4895       break;
4896     }
4897   }
4898   verify_work_stacks_empty();
4899   verify_overflow_empty();
4900   return cumNumDirtyCards;
4901 }
4902 
4903 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4904 // below are largely identical; if you need to modify
4905 // one of these methods, please check the other method too.
4906 
4907 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4908   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4909   // strategy: it's similar to precleamModUnionTable above, in that
4910   // we accumulate contiguous ranges of dirty cards, mark these cards
4911   // precleaned, then scan the region covered by these cards.
4912   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4913   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4914 
4915   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4916 
4917   size_t numDirtyCards, cumNumDirtyCards;
4918   HeapWord *lastAddr, *nextAddr;
4919 
4920   for (cumNumDirtyCards = numDirtyCards = 0,
4921        nextAddr = lastAddr = startAddr;
4922        nextAddr < endAddr;
4923        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4924 
4925     ResourceMark rm;
4926     HandleMark   hm;
4927 
4928     MemRegion dirtyRegion;
4929     {
4930       // See comments in "Precleaning notes" above on why we
4931       // do this locking. XXX Could the locking overheads be
4932       // too high when dirty cards are sparse? [I don't think so.]
4933       stopTimer();
4934       CMSTokenSync x(true); // is cms thread
4935       startTimer();
4936       sample_eden();
4937       // Get and clear dirty region from card table
4938       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4939                                     MemRegion(nextAddr, endAddr),
4940                                     true,
4941                                     CardTableModRefBS::precleaned_card_val());
4942 
4943       assert(dirtyRegion.start() >= nextAddr,
4944              "returned region inconsistent?");
4945     }
4946     lastAddr = dirtyRegion.end();
4947     numDirtyCards =
4948       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4949 
4950     if (!dirtyRegion.is_empty()) {
4951       stopTimer();
4952       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4953       startTimer();
4954       sample_eden();
4955       verify_work_stacks_empty();
4956       verify_overflow_empty();
4957       HeapWord* stop_point =
4958         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4959       if (stop_point != NULL) {
4960         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4961                "Should only be AbortablePreclean.");
4962         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4963         if (should_abort_preclean()) {
4964           break; // out of preclean loop
4965         } else {
4966           // Compute the next address at which preclean should pick up.
4967           lastAddr = next_card_start_after_block(stop_point);
4968         }
4969       }
4970     } else {
4971       break;
4972     }
4973   }
4974   verify_work_stacks_empty();
4975   verify_overflow_empty();
4976   return cumNumDirtyCards;
4977 }
4978 
4979 class PrecleanKlassClosure : public KlassClosure {
4980   CMKlassClosure _cm_klass_closure;
4981  public:
4982   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4983   void do_klass(Klass* k) {
4984     if (k->has_accumulated_modified_oops()) {
4985       k->clear_accumulated_modified_oops();
4986 
4987       _cm_klass_closure.do_klass(k);
4988     }
4989   }
4990 };
4991 
4992 // The freelist lock is needed to prevent asserts, is it really needed?
4993 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4994 
4995   cl->set_freelistLock(freelistLock);
4996 
4997   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4998 
4999   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5000   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5001   PrecleanKlassClosure preclean_klass_closure(cl);
5002   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
5003 
5004   verify_work_stacks_empty();
5005   verify_overflow_empty();
5006 }
5007 
5008 void CMSCollector::checkpointRootsFinal(bool asynch,
5009   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5010   assert(_collectorState == FinalMarking, "incorrect state transition?");
5011   check_correct_thread_executing();
5012   // world is stopped at this checkpoint
5013   assert(SafepointSynchronize::is_at_safepoint(),
5014          "world should be stopped");
5015   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5016 
5017   verify_work_stacks_empty();
5018   verify_overflow_empty();
5019 
5020   SpecializationStats::clear();
5021   if (PrintGCDetails) {
5022     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
5023                         _young_gen->used() / K,
5024                         _young_gen->capacity() / K);
5025   }
5026   if (asynch) {
5027     if (CMSScavengeBeforeRemark) {
5028       GenCollectedHeap* gch = GenCollectedHeap::heap();
5029       // Temporarily set flag to false, GCH->do_collection will
5030       // expect it to be false and set to true
5031       FlagSetting fl(gch->_is_gc_active, false);
5032       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5033         PrintGCDetails && Verbose, true, _gc_timer_cm);)
5034       int level = _cmsGen->level() - 1;
5035       if (level >= 0) {
5036         gch->do_collection(true,        // full (i.e. force, see below)
5037                            false,       // !clear_all_soft_refs
5038                            0,           // size
5039                            false,       // is_tlab
5040                            level        // max_level
5041                           );
5042       }
5043     }
5044     FreelistLocker x(this);
5045     MutexLockerEx y(bitMapLock(),
5046                     Mutex::_no_safepoint_check_flag);
5047     assert(!init_mark_was_synchronous, "but that's impossible!");
5048     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5049   } else {
5050     // already have all the locks
5051     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5052                              init_mark_was_synchronous);
5053   }
5054   verify_work_stacks_empty();
5055   verify_overflow_empty();
5056   SpecializationStats::print();
5057 }
5058 
5059 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5060   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5061 
5062   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
5063 
5064   assert(haveFreelistLocks(), "must have free list locks");
5065   assert_lock_strong(bitMapLock());
5066 
5067   if (UseAdaptiveSizePolicy) {
5068     size_policy()->checkpoint_roots_final_begin();
5069   }
5070 
5071   ResourceMark rm;
5072   HandleMark   hm;
5073 
5074   GenCollectedHeap* gch = GenCollectedHeap::heap();
5075 
5076   if (should_unload_classes()) {
5077     CodeCache::gc_prologue();
5078   }
5079   assert(haveFreelistLocks(), "must have free list locks");
5080   assert_lock_strong(bitMapLock());
5081 
5082   if (!init_mark_was_synchronous) {
5083     // We might assume that we need not fill TLAB's when
5084     // CMSScavengeBeforeRemark is set, because we may have just done
5085     // a scavenge which would have filled all TLAB's -- and besides
5086     // Eden would be empty. This however may not always be the case --
5087     // for instance although we asked for a scavenge, it may not have
5088     // happened because of a JNI critical section. We probably need
5089     // a policy for deciding whether we can in that case wait until
5090     // the critical section releases and then do the remark following
5091     // the scavenge, and skip it here. In the absence of that policy,
5092     // or of an indication of whether the scavenge did indeed occur,
5093     // we cannot rely on TLAB's having been filled and must do
5094     // so here just in case a scavenge did not happen.
5095     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
5096     // Update the saved marks which may affect the root scans.
5097     gch->save_marks();
5098 
5099     if (CMSPrintEdenSurvivorChunks) {
5100       print_eden_and_survivor_chunk_arrays();
5101     }
5102 
5103     {
5104       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5105 
5106       // Note on the role of the mod union table:
5107       // Since the marker in "markFromRoots" marks concurrently with
5108       // mutators, it is possible for some reachable objects not to have been
5109       // scanned. For instance, an only reference to an object A was
5110       // placed in object B after the marker scanned B. Unless B is rescanned,
5111       // A would be collected. Such updates to references in marked objects
5112       // are detected via the mod union table which is the set of all cards
5113       // dirtied since the first checkpoint in this GC cycle and prior to
5114       // the most recent young generation GC, minus those cleaned up by the
5115       // concurrent precleaning.
5116       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5117         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5118         do_remark_parallel();
5119       } else {
5120         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5121                     _gc_timer_cm);
5122         do_remark_non_parallel();
5123       }
5124     }
5125   } else {
5126     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5127     // The initial mark was stop-world, so there's no rescanning to
5128     // do; go straight on to the next step below.
5129   }
5130   verify_work_stacks_empty();
5131   verify_overflow_empty();
5132 
5133   {
5134     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
5135     refProcessingWork(asynch, clear_all_soft_refs);
5136   }
5137   verify_work_stacks_empty();
5138   verify_overflow_empty();
5139 
5140   if (should_unload_classes()) {
5141     CodeCache::gc_epilogue();
5142   }
5143   JvmtiExport::gc_epilogue();
5144 
5145   // If we encountered any (marking stack / work queue) overflow
5146   // events during the current CMS cycle, take appropriate
5147   // remedial measures, where possible, so as to try and avoid
5148   // recurrence of that condition.
5149   assert(_markStack.isEmpty(), "No grey objects");
5150   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5151                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5152   if (ser_ovflw > 0) {
5153     if (PrintCMSStatistics != 0) {
5154       gclog_or_tty->print_cr("Marking stack overflow (benign) "
5155         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5156         ", kac_preclean="SIZE_FORMAT")",
5157         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5158         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5159     }
5160     _markStack.expand();
5161     _ser_pmc_remark_ovflw = 0;
5162     _ser_pmc_preclean_ovflw = 0;
5163     _ser_kac_preclean_ovflw = 0;
5164     _ser_kac_ovflw = 0;
5165   }
5166   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5167     if (PrintCMSStatistics != 0) {
5168       gclog_or_tty->print_cr("Work queue overflow (benign) "
5169         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5170         _par_pmc_remark_ovflw, _par_kac_ovflw);
5171     }
5172     _par_pmc_remark_ovflw = 0;
5173     _par_kac_ovflw = 0;
5174   }
5175   if (PrintCMSStatistics != 0) {
5176      if (_markStack._hit_limit > 0) {
5177        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5178                               _markStack._hit_limit);
5179      }
5180      if (_markStack._failed_double > 0) {
5181        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5182                               " current capacity "SIZE_FORMAT,
5183                               _markStack._failed_double,
5184                               _markStack.capacity());
5185      }
5186   }
5187   _markStack._hit_limit = 0;
5188   _markStack._failed_double = 0;
5189 
5190   if ((VerifyAfterGC || VerifyDuringGC) &&
5191       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5192     verify_after_remark();
5193   }
5194 
5195   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5196 
5197   // Change under the freelistLocks.
5198   _collectorState = Sweeping;
5199   // Call isAllClear() under bitMapLock
5200   assert(_modUnionTable.isAllClear(),
5201       "Should be clear by end of the final marking");
5202   assert(_ct->klass_rem_set()->mod_union_is_clear(),
5203       "Should be clear by end of the final marking");
5204   if (UseAdaptiveSizePolicy) {
5205     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5206   }
5207 }
5208 
5209 void CMSParInitialMarkTask::work(uint worker_id) {
5210   elapsedTimer _timer;
5211   ResourceMark rm;
5212   HandleMark   hm;
5213 
5214   // ---------- scan from roots --------------
5215   _timer.start();
5216   GenCollectedHeap* gch = GenCollectedHeap::heap();
5217   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5218   CMKlassClosure klass_closure(&par_mri_cl);
5219 
5220   // ---------- young gen roots --------------
5221   {
5222     work_on_young_gen_roots(worker_id, &par_mri_cl);
5223     _timer.stop();
5224     if (PrintCMSStatistics != 0) {
5225       gclog_or_tty->print_cr(
5226         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5227         worker_id, _timer.seconds());
5228     }
5229   }
5230 
5231   // ---------- remaining roots --------------
5232   _timer.reset();
5233   _timer.start();
5234   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5235                                 false,     // yg was scanned above
5236                                 false,     // this is parallel code
5237                                 false,     // not scavenging
5238                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5239                                 &par_mri_cl,
5240                                 true,   // walk all of code cache if (so & SO_CodeCache)
5241                                 NULL,
5242                                 &klass_closure);
5243   assert(_collector->should_unload_classes()
5244          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5245          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5246   _timer.stop();
5247   if (PrintCMSStatistics != 0) {
5248     gclog_or_tty->print_cr(
5249       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5250       worker_id, _timer.seconds());
5251   }
5252 }
5253 
5254 // Parallel remark task
5255 class CMSParRemarkTask: public CMSParMarkTask {
5256   CompactibleFreeListSpace* _cms_space;
5257 
5258   // The per-thread work queues, available here for stealing.
5259   OopTaskQueueSet*       _task_queues;
5260   ParallelTaskTerminator _term;
5261 
5262  public:
5263   // A value of 0 passed to n_workers will cause the number of
5264   // workers to be taken from the active workers in the work gang.
5265   CMSParRemarkTask(CMSCollector* collector,
5266                    CompactibleFreeListSpace* cms_space,
5267                    int n_workers, FlexibleWorkGang* workers,
5268                    OopTaskQueueSet* task_queues):
5269     CMSParMarkTask("Rescan roots and grey objects in parallel",
5270                    collector, n_workers),
5271     _cms_space(cms_space),
5272     _task_queues(task_queues),
5273     _term(n_workers, task_queues) { }
5274 
5275   OopTaskQueueSet* task_queues() { return _task_queues; }
5276 
5277   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5278 
5279   ParallelTaskTerminator* terminator() { return &_term; }
5280   int n_workers() { return _n_workers; }
5281 
5282   void work(uint worker_id);
5283 
5284  private:
5285   // ... of  dirty cards in old space
5286   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5287                                   Par_MarkRefsIntoAndScanClosure* cl);
5288 
5289   // ... work stealing for the above
5290   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5291 };
5292 
5293 class RemarkKlassClosure : public KlassClosure {
5294   CMKlassClosure _cm_klass_closure;
5295  public:
5296   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5297   void do_klass(Klass* k) {
5298     // Check if we have modified any oops in the Klass during the concurrent marking.
5299     if (k->has_accumulated_modified_oops()) {
5300       k->clear_accumulated_modified_oops();
5301 
5302       // We could have transfered the current modified marks to the accumulated marks,
5303       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5304     } else if (k->has_modified_oops()) {
5305       // Don't clear anything, this info is needed by the next young collection.
5306     } else {
5307       // No modified oops in the Klass.
5308       return;
5309     }
5310 
5311     // The klass has modified fields, need to scan the klass.
5312     _cm_klass_closure.do_klass(k);
5313   }
5314 };
5315 
5316 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5317   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5318   EdenSpace* eden_space = dng->eden();
5319   ContiguousSpace* from_space = dng->from();
5320   ContiguousSpace* to_space   = dng->to();
5321 
5322   HeapWord** eca = _collector->_eden_chunk_array;
5323   size_t     ect = _collector->_eden_chunk_index;
5324   HeapWord** sca = _collector->_survivor_chunk_array;
5325   size_t     sct = _collector->_survivor_chunk_index;
5326 
5327   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5328   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5329 
5330   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5331   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5332   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5333 }
5334 
5335 // work_queue(i) is passed to the closure
5336 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5337 // also is passed to do_dirty_card_rescan_tasks() and to
5338 // do_work_steal() to select the i-th task_queue.
5339 
5340 void CMSParRemarkTask::work(uint worker_id) {
5341   elapsedTimer _timer;
5342   ResourceMark rm;
5343   HandleMark   hm;
5344 
5345   // ---------- rescan from roots --------------
5346   _timer.start();
5347   GenCollectedHeap* gch = GenCollectedHeap::heap();
5348   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5349     _collector->_span, _collector->ref_processor(),
5350     &(_collector->_markBitMap),
5351     work_queue(worker_id));
5352 
5353   // Rescan young gen roots first since these are likely
5354   // coarsely partitioned and may, on that account, constitute
5355   // the critical path; thus, it's best to start off that
5356   // work first.
5357   // ---------- young gen roots --------------
5358   {
5359     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5360     _timer.stop();
5361     if (PrintCMSStatistics != 0) {
5362       gclog_or_tty->print_cr(
5363         "Finished young gen rescan work in %dth thread: %3.3f sec",
5364         worker_id, _timer.seconds());
5365     }
5366   }
5367 
5368   // ---------- remaining roots --------------
5369   _timer.reset();
5370   _timer.start();
5371   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5372                                 false,     // yg was scanned above
5373                                 false,     // this is parallel code
5374                                 false,     // not scavenging
5375                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5376                                 &par_mrias_cl,
5377                                 true,   // walk all of code cache if (so & SO_CodeCache)
5378                                 NULL,
5379                                 NULL);     // The dirty klasses will be handled below
5380   assert(_collector->should_unload_classes()
5381          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5382          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5383   _timer.stop();
5384   if (PrintCMSStatistics != 0) {
5385     gclog_or_tty->print_cr(
5386       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5387       worker_id, _timer.seconds());
5388   }
5389 
5390   // ---------- unhandled CLD scanning ----------
5391   if (worker_id == 0) { // Single threaded at the moment.
5392     _timer.reset();
5393     _timer.start();
5394 
5395     // Scan all new class loader data objects and new dependencies that were
5396     // introduced during concurrent marking.
5397     ResourceMark rm;
5398     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5399     for (int i = 0; i < array->length(); i++) {
5400       par_mrias_cl.do_class_loader_data(array->at(i));
5401     }
5402 
5403     // We don't need to keep track of new CLDs anymore.
5404     ClassLoaderDataGraph::remember_new_clds(false);
5405 
5406     _timer.stop();
5407     if (PrintCMSStatistics != 0) {
5408       gclog_or_tty->print_cr(
5409           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5410           worker_id, _timer.seconds());
5411     }
5412   }
5413 
5414   // ---------- dirty klass scanning ----------
5415   if (worker_id == 0) { // Single threaded at the moment.
5416     _timer.reset();
5417     _timer.start();
5418 
5419     // Scan all classes that was dirtied during the concurrent marking phase.
5420     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5421     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5422 
5423     _timer.stop();
5424     if (PrintCMSStatistics != 0) {
5425       gclog_or_tty->print_cr(
5426           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5427           worker_id, _timer.seconds());
5428     }
5429   }
5430 
5431   // We might have added oops to ClassLoaderData::_handles during the
5432   // concurrent marking phase. These oops point to newly allocated objects
5433   // that are guaranteed to be kept alive. Either by the direct allocation
5434   // code, or when the young collector processes the strong roots. Hence,
5435   // we don't have to revisit the _handles block during the remark phase.
5436 
5437   // ---------- rescan dirty cards ------------
5438   _timer.reset();
5439   _timer.start();
5440 
5441   // Do the rescan tasks for each of the two spaces
5442   // (cms_space) in turn.
5443   // "worker_id" is passed to select the task_queue for "worker_id"
5444   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5445   _timer.stop();
5446   if (PrintCMSStatistics != 0) {
5447     gclog_or_tty->print_cr(
5448       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5449       worker_id, _timer.seconds());
5450   }
5451 
5452   // ---------- steal work from other threads ...
5453   // ---------- ... and drain overflow list.
5454   _timer.reset();
5455   _timer.start();
5456   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5457   _timer.stop();
5458   if (PrintCMSStatistics != 0) {
5459     gclog_or_tty->print_cr(
5460       "Finished work stealing in %dth thread: %3.3f sec",
5461       worker_id, _timer.seconds());
5462   }
5463 }
5464 
5465 // Note that parameter "i" is not used.
5466 void
5467 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5468   OopsInGenClosure* cl, ContiguousSpace* space,
5469   HeapWord** chunk_array, size_t chunk_top) {
5470   // Until all tasks completed:
5471   // . claim an unclaimed task
5472   // . compute region boundaries corresponding to task claimed
5473   //   using chunk_array
5474   // . par_oop_iterate(cl) over that region
5475 
5476   ResourceMark rm;
5477   HandleMark   hm;
5478 
5479   SequentialSubTasksDone* pst = space->par_seq_tasks();
5480   assert(pst->valid(), "Uninitialized use?");
5481 
5482   uint nth_task = 0;
5483   uint n_tasks  = pst->n_tasks();
5484 
5485   HeapWord *start, *end;
5486   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5487     // We claimed task # nth_task; compute its boundaries.
5488     if (chunk_top == 0) {  // no samples were taken
5489       assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5490       start = space->bottom();
5491       end   = space->top();
5492     } else if (nth_task == 0) {
5493       start = space->bottom();
5494       end   = chunk_array[nth_task];
5495     } else if (nth_task < (uint)chunk_top) {
5496       assert(nth_task >= 1, "Control point invariant");
5497       start = chunk_array[nth_task - 1];
5498       end   = chunk_array[nth_task];
5499     } else {
5500       assert(nth_task == (uint)chunk_top, "Control point invariant");
5501       start = chunk_array[chunk_top - 1];
5502       end   = space->top();
5503     }
5504     MemRegion mr(start, end);
5505     // Verify that mr is in space
5506     assert(mr.is_empty() || space->used_region().contains(mr),
5507            "Should be in space");
5508     // Verify that "start" is an object boundary
5509     assert(mr.is_empty() || oop(mr.start())->is_oop(),
5510            "Should be an oop");
5511     space->par_oop_iterate(mr, cl);
5512   }
5513   pst->all_tasks_completed();
5514 }
5515 
5516 void
5517 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5518   CompactibleFreeListSpace* sp, int i,
5519   Par_MarkRefsIntoAndScanClosure* cl) {
5520   // Until all tasks completed:
5521   // . claim an unclaimed task
5522   // . compute region boundaries corresponding to task claimed
5523   // . transfer dirty bits ct->mut for that region
5524   // . apply rescanclosure to dirty mut bits for that region
5525 
5526   ResourceMark rm;
5527   HandleMark   hm;
5528 
5529   OopTaskQueue* work_q = work_queue(i);
5530   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5531   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5532   // CAUTION: This closure has state that persists across calls to
5533   // the work method dirty_range_iterate_clear() in that it has
5534   // imbedded in it a (subtype of) UpwardsObjectClosure. The
5535   // use of that state in the imbedded UpwardsObjectClosure instance
5536   // assumes that the cards are always iterated (even if in parallel
5537   // by several threads) in monotonically increasing order per each
5538   // thread. This is true of the implementation below which picks
5539   // card ranges (chunks) in monotonically increasing order globally
5540   // and, a-fortiori, in monotonically increasing order per thread
5541   // (the latter order being a subsequence of the former).
5542   // If the work code below is ever reorganized into a more chaotic
5543   // work-partitioning form than the current "sequential tasks"
5544   // paradigm, the use of that persistent state will have to be
5545   // revisited and modified appropriately. See also related
5546   // bug 4756801 work on which should examine this code to make
5547   // sure that the changes there do not run counter to the
5548   // assumptions made here and necessary for correctness and
5549   // efficiency. Note also that this code might yield inefficient
5550   // behaviour in the case of very large objects that span one or
5551   // more work chunks. Such objects would potentially be scanned
5552   // several times redundantly. Work on 4756801 should try and
5553   // address that performance anomaly if at all possible. XXX
5554   MemRegion  full_span  = _collector->_span;
5555   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
5556   MarkFromDirtyCardsClosure
5557     greyRescanClosure(_collector, full_span, // entire span of interest
5558                       sp, bm, work_q, cl);
5559 
5560   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5561   assert(pst->valid(), "Uninitialized use?");
5562   uint nth_task = 0;
5563   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5564   MemRegion span = sp->used_region();
5565   HeapWord* start_addr = span.start();
5566   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5567                                            alignment);
5568   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5569   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5570          start_addr, "Check alignment");
5571   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5572          chunk_size, "Check alignment");
5573 
5574   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5575     // Having claimed the nth_task, compute corresponding mem-region,
5576     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5577     // The alignment restriction ensures that we do not need any
5578     // synchronization with other gang-workers while setting or
5579     // clearing bits in thus chunk of the MUT.
5580     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5581                                     start_addr + (nth_task+1)*chunk_size);
5582     // The last chunk's end might be way beyond end of the
5583     // used region. In that case pull back appropriately.
5584     if (this_span.end() > end_addr) {
5585       this_span.set_end(end_addr);
5586       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5587     }
5588     // Iterate over the dirty cards covering this chunk, marking them
5589     // precleaned, and setting the corresponding bits in the mod union
5590     // table. Since we have been careful to partition at Card and MUT-word
5591     // boundaries no synchronization is needed between parallel threads.
5592     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5593                                                  &modUnionClosure);
5594 
5595     // Having transferred these marks into the modUnionTable,
5596     // rescan the marked objects on the dirty cards in the modUnionTable.
5597     // Even if this is at a synchronous collection, the initial marking
5598     // may have been done during an asynchronous collection so there
5599     // may be dirty bits in the mod-union table.
5600     _collector->_modUnionTable.dirty_range_iterate_clear(
5601                   this_span, &greyRescanClosure);
5602     _collector->_modUnionTable.verifyNoOneBitsInRange(
5603                                  this_span.start(),
5604                                  this_span.end());
5605   }
5606   pst->all_tasks_completed();  // declare that i am done
5607 }
5608 
5609 // . see if we can share work_queues with ParNew? XXX
5610 void
5611 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5612                                 int* seed) {
5613   OopTaskQueue* work_q = work_queue(i);
5614   NOT_PRODUCT(int num_steals = 0;)
5615   oop obj_to_scan;
5616   CMSBitMap* bm = &(_collector->_markBitMap);
5617 
5618   while (true) {
5619     // Completely finish any left over work from (an) earlier round(s)
5620     cl->trim_queue(0);
5621     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5622                                          (size_t)ParGCDesiredObjsFromOverflowList);
5623     // Now check if there's any work in the overflow list
5624     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5625     // only affects the number of attempts made to get work from the
5626     // overflow list and does not affect the number of workers.  Just
5627     // pass ParallelGCThreads so this behavior is unchanged.
5628     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5629                                                 work_q,
5630                                                 ParallelGCThreads)) {
5631       // found something in global overflow list;
5632       // not yet ready to go stealing work from others.
5633       // We'd like to assert(work_q->size() != 0, ...)
5634       // because we just took work from the overflow list,
5635       // but of course we can't since all of that could have
5636       // been already stolen from us.
5637       // "He giveth and He taketh away."
5638       continue;
5639     }
5640     // Verify that we have no work before we resort to stealing
5641     assert(work_q->size() == 0, "Have work, shouldn't steal");
5642     // Try to steal from other queues that have work
5643     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5644       NOT_PRODUCT(num_steals++;)
5645       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5646       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5647       // Do scanning work
5648       obj_to_scan->oop_iterate(cl);
5649       // Loop around, finish this work, and try to steal some more
5650     } else if (terminator()->offer_termination()) {
5651         break;  // nirvana from the infinite cycle
5652     }
5653   }
5654   NOT_PRODUCT(
5655     if (PrintCMSStatistics != 0) {
5656       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5657     }
5658   )
5659   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5660          "Else our work is not yet done");
5661 }
5662 
5663 // Record object boundaries in _eden_chunk_array by sampling the eden
5664 // top in the slow-path eden object allocation code path and record
5665 // the boundaries, if CMSEdenChunksRecordAlways is true. If
5666 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
5667 // sampling in sample_eden() that activates during the part of the
5668 // preclean phase.
5669 void CMSCollector::sample_eden_chunk() {
5670   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5671     if (_eden_chunk_lock->try_lock()) {
5672       // Record a sample. This is the critical section. The contents
5673       // of the _eden_chunk_array have to be non-decreasing in the
5674       // address order.
5675       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
5676       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5677              "Unexpected state of Eden");
5678       if (_eden_chunk_index == 0 ||
5679           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5680            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
5681                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5682         _eden_chunk_index++;  // commit sample
5683       }
5684       _eden_chunk_lock->unlock();
5685     }
5686   }
5687 }
5688 
5689 // Return a thread-local PLAB recording array, as appropriate.
5690 void* CMSCollector::get_data_recorder(int thr_num) {
5691   if (_survivor_plab_array != NULL &&
5692       (CMSPLABRecordAlways ||
5693        (_collectorState > Marking && _collectorState < FinalMarking))) {
5694     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5695     ChunkArray* ca = &_survivor_plab_array[thr_num];
5696     ca->reset();   // clear it so that fresh data is recorded
5697     return (void*) ca;
5698   } else {
5699     return NULL;
5700   }
5701 }
5702 
5703 // Reset all the thread-local PLAB recording arrays
5704 void CMSCollector::reset_survivor_plab_arrays() {
5705   for (uint i = 0; i < ParallelGCThreads; i++) {
5706     _survivor_plab_array[i].reset();
5707   }
5708 }
5709 
5710 // Merge the per-thread plab arrays into the global survivor chunk
5711 // array which will provide the partitioning of the survivor space
5712 // for CMS initial scan and rescan.
5713 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5714                                               int no_of_gc_threads) {
5715   assert(_survivor_plab_array  != NULL, "Error");
5716   assert(_survivor_chunk_array != NULL, "Error");
5717   assert(_collectorState == FinalMarking ||
5718          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5719   for (int j = 0; j < no_of_gc_threads; j++) {
5720     _cursor[j] = 0;
5721   }
5722   HeapWord* top = surv->top();
5723   size_t i;
5724   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5725     HeapWord* min_val = top;          // Higher than any PLAB address
5726     uint      min_tid = 0;            // position of min_val this round
5727     for (int j = 0; j < no_of_gc_threads; j++) {
5728       ChunkArray* cur_sca = &_survivor_plab_array[j];
5729       if (_cursor[j] == cur_sca->end()) {
5730         continue;
5731       }
5732       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5733       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5734       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5735       if (cur_val < min_val) {
5736         min_tid = j;
5737         min_val = cur_val;
5738       } else {
5739         assert(cur_val < top, "All recorded addresses should be less");
5740       }
5741     }
5742     // At this point min_val and min_tid are respectively
5743     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5744     // and the thread (j) that witnesses that address.
5745     // We record this address in the _survivor_chunk_array[i]
5746     // and increment _cursor[min_tid] prior to the next round i.
5747     if (min_val == top) {
5748       break;
5749     }
5750     _survivor_chunk_array[i] = min_val;
5751     _cursor[min_tid]++;
5752   }
5753   // We are all done; record the size of the _survivor_chunk_array
5754   _survivor_chunk_index = i; // exclusive: [0, i)
5755   if (PrintCMSStatistics > 0) {
5756     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5757   }
5758   // Verify that we used up all the recorded entries
5759   #ifdef ASSERT
5760     size_t total = 0;
5761     for (int j = 0; j < no_of_gc_threads; j++) {
5762       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5763       total += _cursor[j];
5764     }
5765     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5766     // Check that the merged array is in sorted order
5767     if (total > 0) {
5768       for (size_t i = 0; i < total - 1; i++) {
5769         if (PrintCMSStatistics > 0) {
5770           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5771                               i, _survivor_chunk_array[i]);
5772         }
5773         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5774                "Not sorted");
5775       }
5776     }
5777   #endif // ASSERT
5778 }
5779 
5780 // Set up the space's par_seq_tasks structure for work claiming
5781 // for parallel initial scan and rescan of young gen.
5782 // See ParRescanTask where this is currently used.
5783 void
5784 CMSCollector::
5785 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5786   assert(n_threads > 0, "Unexpected n_threads argument");
5787   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5788 
5789   // Eden space
5790   {
5791     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5792     assert(!pst->valid(), "Clobbering existing data?");
5793     // Each valid entry in [0, _eden_chunk_index) represents a task.
5794     size_t n_tasks = _eden_chunk_index + 1;
5795     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5796     // Sets the condition for completion of the subtask (how many threads
5797     // need to finish in order to be done).
5798     pst->set_n_threads(n_threads);
5799     pst->set_n_tasks((int)n_tasks);
5800   }
5801 
5802   // Merge the survivor plab arrays into _survivor_chunk_array
5803   if (_survivor_plab_array != NULL) {
5804     merge_survivor_plab_arrays(dng->from(), n_threads);
5805   } else {
5806     assert(_survivor_chunk_index == 0, "Error");
5807   }
5808 
5809   // To space
5810   {
5811     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5812     assert(!pst->valid(), "Clobbering existing data?");
5813     // Sets the condition for completion of the subtask (how many threads
5814     // need to finish in order to be done).
5815     pst->set_n_threads(n_threads);
5816     pst->set_n_tasks(1);
5817     assert(pst->valid(), "Error");
5818   }
5819 
5820   // From space
5821   {
5822     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5823     assert(!pst->valid(), "Clobbering existing data?");
5824     size_t n_tasks = _survivor_chunk_index + 1;
5825     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5826     // Sets the condition for completion of the subtask (how many threads
5827     // need to finish in order to be done).
5828     pst->set_n_threads(n_threads);
5829     pst->set_n_tasks((int)n_tasks);
5830     assert(pst->valid(), "Error");
5831   }
5832 }
5833 
5834 // Parallel version of remark
5835 void CMSCollector::do_remark_parallel() {
5836   GenCollectedHeap* gch = GenCollectedHeap::heap();
5837   FlexibleWorkGang* workers = gch->workers();
5838   assert(workers != NULL, "Need parallel worker threads.");
5839   // Choose to use the number of GC workers most recently set
5840   // into "active_workers".  If active_workers is not set, set it
5841   // to ParallelGCThreads.
5842   int n_workers = workers->active_workers();
5843   if (n_workers == 0) {
5844     assert(n_workers > 0, "Should have been set during scavenge");
5845     n_workers = ParallelGCThreads;
5846     workers->set_active_workers(n_workers);
5847   }
5848   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5849 
5850   CMSParRemarkTask tsk(this,
5851     cms_space,
5852     n_workers, workers, task_queues());
5853 
5854   // Set up for parallel process_strong_roots work.
5855   gch->set_par_threads(n_workers);
5856   // We won't be iterating over the cards in the card table updating
5857   // the younger_gen cards, so we shouldn't call the following else
5858   // the verification code as well as subsequent younger_refs_iterate
5859   // code would get confused. XXX
5860   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5861 
5862   // The young gen rescan work will not be done as part of
5863   // process_strong_roots (which currently doesn't knw how to
5864   // parallelize such a scan), but rather will be broken up into
5865   // a set of parallel tasks (via the sampling that the [abortable]
5866   // preclean phase did of EdenSpace, plus the [two] tasks of
5867   // scanning the [two] survivor spaces. Further fine-grain
5868   // parallelization of the scanning of the survivor spaces
5869   // themselves, and of precleaning of the younger gen itself
5870   // is deferred to the future.
5871   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5872 
5873   // The dirty card rescan work is broken up into a "sequence"
5874   // of parallel tasks (per constituent space) that are dynamically
5875   // claimed by the parallel threads.
5876   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5877 
5878   // It turns out that even when we're using 1 thread, doing the work in a
5879   // separate thread causes wide variance in run times.  We can't help this
5880   // in the multi-threaded case, but we special-case n=1 here to get
5881   // repeatable measurements of the 1-thread overhead of the parallel code.
5882   if (n_workers > 1) {
5883     // Make refs discovery MT-safe, if it isn't already: it may not
5884     // necessarily be so, since it's possible that we are doing
5885     // ST marking.
5886     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5887     GenCollectedHeap::StrongRootsScope srs(gch);
5888     workers->run_task(&tsk);
5889   } else {
5890     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5891     GenCollectedHeap::StrongRootsScope srs(gch);
5892     tsk.work(0);
5893   }
5894 
5895   gch->set_par_threads(0);  // 0 ==> non-parallel.
5896   // restore, single-threaded for now, any preserved marks
5897   // as a result of work_q overflow
5898   restore_preserved_marks_if_any();
5899 }
5900 
5901 // Non-parallel version of remark
5902 void CMSCollector::do_remark_non_parallel() {
5903   ResourceMark rm;
5904   HandleMark   hm;
5905   GenCollectedHeap* gch = GenCollectedHeap::heap();
5906   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5907 
5908   MarkRefsIntoAndScanClosure
5909     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5910              &_markStack, this,
5911              false /* should_yield */, false /* not precleaning */);
5912   MarkFromDirtyCardsClosure
5913     markFromDirtyCardsClosure(this, _span,
5914                               NULL,  // space is set further below
5915                               &_markBitMap, &_markStack, &mrias_cl);
5916   {
5917     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5918     // Iterate over the dirty cards, setting the corresponding bits in the
5919     // mod union table.
5920     {
5921       ModUnionClosure modUnionClosure(&_modUnionTable);
5922       _ct->ct_bs()->dirty_card_iterate(
5923                       _cmsGen->used_region(),
5924                       &modUnionClosure);
5925     }
5926     // Having transferred these marks into the modUnionTable, we just need
5927     // to rescan the marked objects on the dirty cards in the modUnionTable.
5928     // The initial marking may have been done during an asynchronous
5929     // collection so there may be dirty bits in the mod-union table.
5930     const int alignment =
5931       CardTableModRefBS::card_size * BitsPerWord;
5932     {
5933       // ... First handle dirty cards in CMS gen
5934       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5935       MemRegion ur = _cmsGen->used_region();
5936       HeapWord* lb = ur.start();
5937       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5938       MemRegion cms_span(lb, ub);
5939       _modUnionTable.dirty_range_iterate_clear(cms_span,
5940                                                &markFromDirtyCardsClosure);
5941       verify_work_stacks_empty();
5942       if (PrintCMSStatistics != 0) {
5943         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5944           markFromDirtyCardsClosure.num_dirty_cards());
5945       }
5946     }
5947   }
5948   if (VerifyDuringGC &&
5949       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5950     HandleMark hm;  // Discard invalid handles created during verification
5951     Universe::verify();
5952   }
5953   {
5954     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5955 
5956     verify_work_stacks_empty();
5957 
5958     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5959     GenCollectedHeap::StrongRootsScope srs(gch);
5960     gch->gen_process_strong_roots(_cmsGen->level(),
5961                                   true,  // younger gens as roots
5962                                   false, // use the local StrongRootsScope
5963                                   false, // not scavenging
5964                                   SharedHeap::ScanningOption(roots_scanning_options()),
5965                                   &mrias_cl,
5966                                   true,   // walk code active on stacks
5967                                   NULL,
5968                                   NULL);  // The dirty klasses will be handled below
5969 
5970     assert(should_unload_classes()
5971            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5972            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5973   }
5974 
5975   {
5976     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5977 
5978     verify_work_stacks_empty();
5979 
5980     // Scan all class loader data objects that might have been introduced
5981     // during concurrent marking.
5982     ResourceMark rm;
5983     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5984     for (int i = 0; i < array->length(); i++) {
5985       mrias_cl.do_class_loader_data(array->at(i));
5986     }
5987 
5988     // We don't need to keep track of new CLDs anymore.
5989     ClassLoaderDataGraph::remember_new_clds(false);
5990 
5991     verify_work_stacks_empty();
5992   }
5993 
5994   {
5995     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5996 
5997     verify_work_stacks_empty();
5998 
5999     RemarkKlassClosure remark_klass_closure(&mrias_cl);
6000     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6001 
6002     verify_work_stacks_empty();
6003   }
6004 
6005   // We might have added oops to ClassLoaderData::_handles during the
6006   // concurrent marking phase. These oops point to newly allocated objects
6007   // that are guaranteed to be kept alive. Either by the direct allocation
6008   // code, or when the young collector processes the strong roots. Hence,
6009   // we don't have to revisit the _handles block during the remark phase.
6010 
6011   verify_work_stacks_empty();
6012   // Restore evacuated mark words, if any, used for overflow list links
6013   if (!CMSOverflowEarlyRestoration) {
6014     restore_preserved_marks_if_any();
6015   }
6016   verify_overflow_empty();
6017 }
6018 
6019 ////////////////////////////////////////////////////////
6020 // Parallel Reference Processing Task Proxy Class
6021 ////////////////////////////////////////////////////////
6022 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
6023   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
6024   CMSCollector*          _collector;
6025   CMSBitMap*             _mark_bit_map;
6026   const MemRegion        _span;
6027   ProcessTask&           _task;
6028 
6029 public:
6030   CMSRefProcTaskProxy(ProcessTask&     task,
6031                       CMSCollector*    collector,
6032                       const MemRegion& span,
6033                       CMSBitMap*       mark_bit_map,
6034                       AbstractWorkGang* workers,
6035                       OopTaskQueueSet* task_queues):
6036     // XXX Should superclass AGTWOQ also know about AWG since it knows
6037     // about the task_queues used by the AWG? Then it could initialize
6038     // the terminator() object. See 6984287. The set_for_termination()
6039     // below is a temporary band-aid for the regression in 6984287.
6040     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
6041       task_queues),
6042     _task(task),
6043     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
6044   {
6045     assert(_collector->_span.equals(_span) && !_span.is_empty(),
6046            "Inconsistency in _span");
6047     set_for_termination(workers->active_workers());
6048   }
6049 
6050   OopTaskQueueSet* task_queues() { return queues(); }
6051 
6052   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6053 
6054   void do_work_steal(int i,
6055                      CMSParDrainMarkingStackClosure* drain,
6056                      CMSParKeepAliveClosure* keep_alive,
6057                      int* seed);
6058 
6059   virtual void work(uint worker_id);
6060 };
6061 
6062 void CMSRefProcTaskProxy::work(uint worker_id) {
6063   assert(_collector->_span.equals(_span), "Inconsistency in _span");
6064   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6065                                         _mark_bit_map,
6066                                         work_queue(worker_id));
6067   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6068                                                  _mark_bit_map,
6069                                                  work_queue(worker_id));
6070   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6071   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6072   if (_task.marks_oops_alive()) {
6073     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6074                   _collector->hash_seed(worker_id));
6075   }
6076   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6077   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6078 }
6079 
6080 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6081   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6082   EnqueueTask& _task;
6083 
6084 public:
6085   CMSRefEnqueueTaskProxy(EnqueueTask& task)
6086     : AbstractGangTask("Enqueue reference objects in parallel"),
6087       _task(task)
6088   { }
6089 
6090   virtual void work(uint worker_id)
6091   {
6092     _task.work(worker_id);
6093   }
6094 };
6095 
6096 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6097   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6098    _span(span),
6099    _bit_map(bit_map),
6100    _work_queue(work_queue),
6101    _mark_and_push(collector, span, bit_map, work_queue),
6102    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6103                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6104 { }
6105 
6106 // . see if we can share work_queues with ParNew? XXX
6107 void CMSRefProcTaskProxy::do_work_steal(int i,
6108   CMSParDrainMarkingStackClosure* drain,
6109   CMSParKeepAliveClosure* keep_alive,
6110   int* seed) {
6111   OopTaskQueue* work_q = work_queue(i);
6112   NOT_PRODUCT(int num_steals = 0;)
6113   oop obj_to_scan;
6114 
6115   while (true) {
6116     // Completely finish any left over work from (an) earlier round(s)
6117     drain->trim_queue(0);
6118     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6119                                          (size_t)ParGCDesiredObjsFromOverflowList);
6120     // Now check if there's any work in the overflow list
6121     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6122     // only affects the number of attempts made to get work from the
6123     // overflow list and does not affect the number of workers.  Just
6124     // pass ParallelGCThreads so this behavior is unchanged.
6125     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6126                                                 work_q,
6127                                                 ParallelGCThreads)) {
6128       // Found something in global overflow list;
6129       // not yet ready to go stealing work from others.
6130       // We'd like to assert(work_q->size() != 0, ...)
6131       // because we just took work from the overflow list,
6132       // but of course we can't, since all of that might have
6133       // been already stolen from us.
6134       continue;
6135     }
6136     // Verify that we have no work before we resort to stealing
6137     assert(work_q->size() == 0, "Have work, shouldn't steal");
6138     // Try to steal from other queues that have work
6139     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6140       NOT_PRODUCT(num_steals++;)
6141       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6142       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6143       // Do scanning work
6144       obj_to_scan->oop_iterate(keep_alive);
6145       // Loop around, finish this work, and try to steal some more
6146     } else if (terminator()->offer_termination()) {
6147       break;  // nirvana from the infinite cycle
6148     }
6149   }
6150   NOT_PRODUCT(
6151     if (PrintCMSStatistics != 0) {
6152       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6153     }
6154   )
6155 }
6156 
6157 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6158 {
6159   GenCollectedHeap* gch = GenCollectedHeap::heap();
6160   FlexibleWorkGang* workers = gch->workers();
6161   assert(workers != NULL, "Need parallel worker threads.");
6162   CMSRefProcTaskProxy rp_task(task, &_collector,
6163                               _collector.ref_processor()->span(),
6164                               _collector.markBitMap(),
6165                               workers, _collector.task_queues());
6166   workers->run_task(&rp_task);
6167 }
6168 
6169 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6170 {
6171 
6172   GenCollectedHeap* gch = GenCollectedHeap::heap();
6173   FlexibleWorkGang* workers = gch->workers();
6174   assert(workers != NULL, "Need parallel worker threads.");
6175   CMSRefEnqueueTaskProxy enq_task(task);
6176   workers->run_task(&enq_task);
6177 }
6178 
6179 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6180 
6181   ResourceMark rm;
6182   HandleMark   hm;
6183 
6184   ReferenceProcessor* rp = ref_processor();
6185   assert(rp->span().equals(_span), "Spans should be equal");
6186   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6187   // Process weak references.
6188   rp->setup_policy(clear_all_soft_refs);
6189   verify_work_stacks_empty();
6190 
6191   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6192                                           &_markStack, false /* !preclean */);
6193   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6194                                 _span, &_markBitMap, &_markStack,
6195                                 &cmsKeepAliveClosure, false /* !preclean */);
6196   {
6197     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
6198 
6199     ReferenceProcessorStats stats;
6200     if (rp->processing_is_mt()) {
6201       // Set the degree of MT here.  If the discovery is done MT, there
6202       // may have been a different number of threads doing the discovery
6203       // and a different number of discovered lists may have Ref objects.
6204       // That is OK as long as the Reference lists are balanced (see
6205       // balance_all_queues() and balance_queues()).
6206       GenCollectedHeap* gch = GenCollectedHeap::heap();
6207       int active_workers = ParallelGCThreads;
6208       FlexibleWorkGang* workers = gch->workers();
6209       if (workers != NULL) {
6210         active_workers = workers->active_workers();
6211         // The expectation is that active_workers will have already
6212         // been set to a reasonable value.  If it has not been set,
6213         // investigate.
6214         assert(active_workers > 0, "Should have been set during scavenge");
6215       }
6216       rp->set_active_mt_degree(active_workers);
6217       CMSRefProcTaskExecutor task_executor(*this);
6218       stats = rp->process_discovered_references(&_is_alive_closure,
6219                                         &cmsKeepAliveClosure,
6220                                         &cmsDrainMarkingStackClosure,
6221                                         &task_executor,
6222                                         _gc_timer_cm);
6223     } else {
6224       stats = rp->process_discovered_references(&_is_alive_closure,
6225                                         &cmsKeepAliveClosure,
6226                                         &cmsDrainMarkingStackClosure,
6227                                         NULL,
6228                                         _gc_timer_cm);
6229     }
6230     _gc_tracer_cm->report_gc_reference_stats(stats);
6231 
6232   }
6233 
6234   // This is the point where the entire marking should have completed.
6235   verify_work_stacks_empty();
6236 
6237   if (should_unload_classes()) {
6238     {
6239       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
6240 
6241       // Unload classes and purge the SystemDictionary.
6242       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6243 
6244       // Unload nmethods.
6245       CodeCache::do_unloading(&_is_alive_closure, purged_class);
6246 
6247       // Prune dead klasses from subklass/sibling/implementor lists.
6248       Klass::clean_weak_klass_links(&_is_alive_closure);
6249     }
6250 
6251     {
6252       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
6253       // Clean up unreferenced symbols in symbol table.
6254       SymbolTable::unlink();
6255     }
6256   }
6257 
6258   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6259   // Need to check if we really scanned the StringTable.
6260   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6261     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
6262     // Delete entries for dead interned strings.
6263     StringTable::unlink(&_is_alive_closure);
6264   }
6265 
6266   // Restore any preserved marks as a result of mark stack or
6267   // work queue overflow
6268   restore_preserved_marks_if_any();  // done single-threaded for now
6269 
6270   rp->set_enqueuing_is_done(true);
6271   if (rp->processing_is_mt()) {
6272     rp->balance_all_queues();
6273     CMSRefProcTaskExecutor task_executor(*this);
6274     rp->enqueue_discovered_references(&task_executor);
6275   } else {
6276     rp->enqueue_discovered_references(NULL);
6277   }
6278   rp->verify_no_references_recorded();
6279   assert(!rp->discovery_enabled(), "should have been disabled");
6280 }
6281 
6282 #ifndef PRODUCT
6283 void CMSCollector::check_correct_thread_executing() {
6284   Thread* t = Thread::current();
6285   // Only the VM thread or the CMS thread should be here.
6286   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6287          "Unexpected thread type");
6288   // If this is the vm thread, the foreground process
6289   // should not be waiting.  Note that _foregroundGCIsActive is
6290   // true while the foreground collector is waiting.
6291   if (_foregroundGCShouldWait) {
6292     // We cannot be the VM thread
6293     assert(t->is_ConcurrentGC_thread(),
6294            "Should be CMS thread");
6295   } else {
6296     // We can be the CMS thread only if we are in a stop-world
6297     // phase of CMS collection.
6298     if (t->is_ConcurrentGC_thread()) {
6299       assert(_collectorState == InitialMarking ||
6300              _collectorState == FinalMarking,
6301              "Should be a stop-world phase");
6302       // The CMS thread should be holding the CMS_token.
6303       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6304              "Potential interference with concurrently "
6305              "executing VM thread");
6306     }
6307   }
6308 }
6309 #endif
6310 
6311 void CMSCollector::sweep(bool asynch) {
6312   assert(_collectorState == Sweeping, "just checking");
6313   check_correct_thread_executing();
6314   verify_work_stacks_empty();
6315   verify_overflow_empty();
6316   increment_sweep_count();
6317   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6318 
6319   _inter_sweep_timer.stop();
6320   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6321   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6322 
6323   assert(!_intra_sweep_timer.is_active(), "Should not be active");
6324   _intra_sweep_timer.reset();
6325   _intra_sweep_timer.start();
6326   if (asynch) {
6327     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6328     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6329     // First sweep the old gen
6330     {
6331       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6332                                bitMapLock());
6333       sweepWork(_cmsGen, asynch);
6334     }
6335 
6336     // Update Universe::_heap_*_at_gc figures.
6337     // We need all the free list locks to make the abstract state
6338     // transition from Sweeping to Resetting. See detailed note
6339     // further below.
6340     {
6341       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6342       // Update heap occupancy information which is used as
6343       // input to soft ref clearing policy at the next gc.
6344       Universe::update_heap_info_at_gc();
6345       _collectorState = Resizing;
6346     }
6347   } else {
6348     // already have needed locks
6349     sweepWork(_cmsGen,  asynch);
6350     // Update heap occupancy information which is used as
6351     // input to soft ref clearing policy at the next gc.
6352     Universe::update_heap_info_at_gc();
6353     _collectorState = Resizing;
6354   }
6355   verify_work_stacks_empty();
6356   verify_overflow_empty();
6357 
6358   if (should_unload_classes()) {
6359     ClassLoaderDataGraph::purge();
6360   }
6361 
6362   _intra_sweep_timer.stop();
6363   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6364 
6365   _inter_sweep_timer.reset();
6366   _inter_sweep_timer.start();
6367 
6368   // We need to use a monotonically non-deccreasing time in ms
6369   // or we will see time-warp warnings and os::javaTimeMillis()
6370   // does not guarantee monotonicity.
6371   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6372   update_time_of_last_gc(now);
6373 
6374   // NOTE on abstract state transitions:
6375   // Mutators allocate-live and/or mark the mod-union table dirty
6376   // based on the state of the collection.  The former is done in
6377   // the interval [Marking, Sweeping] and the latter in the interval
6378   // [Marking, Sweeping).  Thus the transitions into the Marking state
6379   // and out of the Sweeping state must be synchronously visible
6380   // globally to the mutators.
6381   // The transition into the Marking state happens with the world
6382   // stopped so the mutators will globally see it.  Sweeping is
6383   // done asynchronously by the background collector so the transition
6384   // from the Sweeping state to the Resizing state must be done
6385   // under the freelistLock (as is the check for whether to
6386   // allocate-live and whether to dirty the mod-union table).
6387   assert(_collectorState == Resizing, "Change of collector state to"
6388     " Resizing must be done under the freelistLocks (plural)");
6389 
6390   // Now that sweeping has been completed, we clear
6391   // the incremental_collection_failed flag,
6392   // thus inviting a younger gen collection to promote into
6393   // this generation. If such a promotion may still fail,
6394   // the flag will be set again when a young collection is
6395   // attempted.
6396   GenCollectedHeap* gch = GenCollectedHeap::heap();
6397   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
6398   gch->update_full_collections_completed(_collection_count_start);
6399 }
6400 
6401 // FIX ME!!! Looks like this belongs in CFLSpace, with
6402 // CMSGen merely delegating to it.
6403 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6404   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6405   HeapWord*  minAddr        = _cmsSpace->bottom();
6406   HeapWord*  largestAddr    =
6407     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6408   if (largestAddr == NULL) {
6409     // The dictionary appears to be empty.  In this case
6410     // try to coalesce at the end of the heap.
6411     largestAddr = _cmsSpace->end();
6412   }
6413   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
6414   size_t nearLargestOffset =
6415     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6416   if (PrintFLSStatistics != 0) {
6417     gclog_or_tty->print_cr(
6418       "CMS: Large Block: " PTR_FORMAT ";"
6419       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6420       largestAddr,
6421       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6422   }
6423   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6424 }
6425 
6426 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6427   return addr >= _cmsSpace->nearLargestChunk();
6428 }
6429 
6430 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6431   return _cmsSpace->find_chunk_at_end();
6432 }
6433 
6434 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6435                                                     bool full) {
6436   // The next lower level has been collected.  Gather any statistics
6437   // that are of interest at this point.
6438   if (!full && (current_level + 1) == level()) {
6439     // Gather statistics on the young generation collection.
6440     collector()->stats().record_gc0_end(used());
6441   }
6442 }
6443 
6444 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6445   GenCollectedHeap* gch = GenCollectedHeap::heap();
6446   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6447     "Wrong type of heap");
6448   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6449     gch->gen_policy()->size_policy();
6450   assert(sp->is_gc_cms_adaptive_size_policy(),
6451     "Wrong type of size policy");
6452   return sp;
6453 }
6454 
6455 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6456   if (PrintGCDetails && Verbose) {
6457     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6458   }
6459   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6460   _debug_collection_type =
6461     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6462   if (PrintGCDetails && Verbose) {
6463     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6464   }
6465 }
6466 
6467 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6468   bool asynch) {
6469   // We iterate over the space(s) underlying this generation,
6470   // checking the mark bit map to see if the bits corresponding
6471   // to specific blocks are marked or not. Blocks that are
6472   // marked are live and are not swept up. All remaining blocks
6473   // are swept up, with coalescing on-the-fly as we sweep up
6474   // contiguous free and/or garbage blocks:
6475   // We need to ensure that the sweeper synchronizes with allocators
6476   // and stop-the-world collectors. In particular, the following
6477   // locks are used:
6478   // . CMS token: if this is held, a stop the world collection cannot occur
6479   // . freelistLock: if this is held no allocation can occur from this
6480   //                 generation by another thread
6481   // . bitMapLock: if this is held, no other thread can access or update
6482   //
6483 
6484   // Note that we need to hold the freelistLock if we use
6485   // block iterate below; else the iterator might go awry if
6486   // a mutator (or promotion) causes block contents to change
6487   // (for instance if the allocator divvies up a block).
6488   // If we hold the free list lock, for all practical purposes
6489   // young generation GC's can't occur (they'll usually need to
6490   // promote), so we might as well prevent all young generation
6491   // GC's while we do a sweeping step. For the same reason, we might
6492   // as well take the bit map lock for the entire duration
6493 
6494   // check that we hold the requisite locks
6495   assert(have_cms_token(), "Should hold cms token");
6496   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6497          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6498         "Should possess CMS token to sweep");
6499   assert_lock_strong(gen->freelistLock());
6500   assert_lock_strong(bitMapLock());
6501 
6502   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6503   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
6504   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6505                                       _inter_sweep_estimate.padded_average(),
6506                                       _intra_sweep_estimate.padded_average());
6507   gen->setNearLargestChunk();
6508 
6509   {
6510     SweepClosure sweepClosure(this, gen, &_markBitMap,
6511                             CMSYield && asynch);
6512     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6513     // We need to free-up/coalesce garbage/blocks from a
6514     // co-terminal free run. This is done in the SweepClosure
6515     // destructor; so, do not remove this scope, else the
6516     // end-of-sweep-census below will be off by a little bit.
6517   }
6518   gen->cmsSpace()->sweep_completed();
6519   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6520   if (should_unload_classes()) {                // unloaded classes this cycle,
6521     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6522   } else {                                      // did not unload classes,
6523     _concurrent_cycles_since_last_unload++;     // ... increment count
6524   }
6525 }
6526 
6527 // Reset CMS data structures (for now just the marking bit map)
6528 // preparatory for the next cycle.
6529 void CMSCollector::reset(bool asynch) {
6530   GenCollectedHeap* gch = GenCollectedHeap::heap();
6531   CMSAdaptiveSizePolicy* sp = size_policy();
6532   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6533   if (asynch) {
6534     CMSTokenSyncWithLocks ts(true, bitMapLock());
6535 
6536     // If the state is not "Resetting", the foreground  thread
6537     // has done a collection and the resetting.
6538     if (_collectorState != Resetting) {
6539       assert(_collectorState == Idling, "The state should only change"
6540         " because the foreground collector has finished the collection");
6541       return;
6542     }
6543 
6544     // Clear the mark bitmap (no grey objects to start with)
6545     // for the next cycle.
6546     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6547     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6548 
6549     HeapWord* curAddr = _markBitMap.startWord();
6550     while (curAddr < _markBitMap.endWord()) {
6551       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6552       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6553       _markBitMap.clear_large_range(chunk);
6554       if (ConcurrentMarkSweepThread::should_yield() &&
6555           !foregroundGCIsActive() &&
6556           CMSYield) {
6557         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6558                "CMS thread should hold CMS token");
6559         assert_lock_strong(bitMapLock());
6560         bitMapLock()->unlock();
6561         ConcurrentMarkSweepThread::desynchronize(true);
6562         ConcurrentMarkSweepThread::acknowledge_yield_request();
6563         stopTimer();
6564         if (PrintCMSStatistics != 0) {
6565           incrementYields();
6566         }
6567         icms_wait();
6568 
6569         // See the comment in coordinator_yield()
6570         for (unsigned i = 0; i < CMSYieldSleepCount &&
6571                          ConcurrentMarkSweepThread::should_yield() &&
6572                          !CMSCollector::foregroundGCIsActive(); ++i) {
6573           os::sleep(Thread::current(), 1, false);
6574           ConcurrentMarkSweepThread::acknowledge_yield_request();
6575         }
6576 
6577         ConcurrentMarkSweepThread::synchronize(true);
6578         bitMapLock()->lock_without_safepoint_check();
6579         startTimer();
6580       }
6581       curAddr = chunk.end();
6582     }
6583     // A successful mostly concurrent collection has been done.
6584     // Because only the full (i.e., concurrent mode failure) collections
6585     // are being measured for gc overhead limits, clean the "near" flag
6586     // and count.
6587     sp->reset_gc_overhead_limit_count();
6588     _collectorState = Idling;
6589   } else {
6590     // already have the lock
6591     assert(_collectorState == Resetting, "just checking");
6592     assert_lock_strong(bitMapLock());
6593     _markBitMap.clear_all();
6594     _collectorState = Idling;
6595   }
6596 
6597   // Stop incremental mode after a cycle completes, so that any future cycles
6598   // are triggered by allocation.
6599   stop_icms();
6600 
6601   NOT_PRODUCT(
6602     if (RotateCMSCollectionTypes) {
6603       _cmsGen->rotate_debug_collection_type();
6604     }
6605   )
6606 
6607   register_gc_end();
6608 }
6609 
6610 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6611   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6612   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6613   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
6614   TraceCollectorStats tcs(counters());
6615 
6616   switch (op) {
6617     case CMS_op_checkpointRootsInitial: {
6618       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6619       checkpointRootsInitial(true);       // asynch
6620       if (PrintGC) {
6621         _cmsGen->printOccupancy("initial-mark");
6622       }
6623       break;
6624     }
6625     case CMS_op_checkpointRootsFinal: {
6626       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6627       checkpointRootsFinal(true,    // asynch
6628                            false,   // !clear_all_soft_refs
6629                            false);  // !init_mark_was_synchronous
6630       if (PrintGC) {
6631         _cmsGen->printOccupancy("remark");
6632       }
6633       break;
6634     }
6635     default:
6636       fatal("No such CMS_op");
6637   }
6638 }
6639 
6640 #ifndef PRODUCT
6641 size_t const CMSCollector::skip_header_HeapWords() {
6642   return FreeChunk::header_size();
6643 }
6644 
6645 // Try and collect here conditions that should hold when
6646 // CMS thread is exiting. The idea is that the foreground GC
6647 // thread should not be blocked if it wants to terminate
6648 // the CMS thread and yet continue to run the VM for a while
6649 // after that.
6650 void CMSCollector::verify_ok_to_terminate() const {
6651   assert(Thread::current()->is_ConcurrentGC_thread(),
6652          "should be called by CMS thread");
6653   assert(!_foregroundGCShouldWait, "should be false");
6654   // We could check here that all the various low-level locks
6655   // are not held by the CMS thread, but that is overkill; see
6656   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6657   // is checked.
6658 }
6659 #endif
6660 
6661 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6662    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6663           "missing Printezis mark?");
6664   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6665   size_t size = pointer_delta(nextOneAddr + 1, addr);
6666   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6667          "alignment problem");
6668   assert(size >= 3, "Necessary for Printezis marks to work");
6669   return size;
6670 }
6671 
6672 // A variant of the above (block_size_using_printezis_bits()) except
6673 // that we return 0 if the P-bits are not yet set.
6674 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6675   if (_markBitMap.isMarked(addr + 1)) {
6676     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6677     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6678     size_t size = pointer_delta(nextOneAddr + 1, addr);
6679     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6680            "alignment problem");
6681     assert(size >= 3, "Necessary for Printezis marks to work");
6682     return size;
6683   }
6684   return 0;
6685 }
6686 
6687 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6688   size_t sz = 0;
6689   oop p = (oop)addr;
6690   if (p->klass_or_null() != NULL) {
6691     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6692   } else {
6693     sz = block_size_using_printezis_bits(addr);
6694   }
6695   assert(sz > 0, "size must be nonzero");
6696   HeapWord* next_block = addr + sz;
6697   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
6698                                              CardTableModRefBS::card_size);
6699   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
6700          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6701          "must be different cards");
6702   return next_card;
6703 }
6704 
6705 
6706 // CMS Bit Map Wrapper /////////////////////////////////////////
6707 
6708 // Construct a CMS bit map infrastructure, but don't create the
6709 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6710 // further below.
6711 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6712   _bm(),
6713   _shifter(shifter),
6714   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6715 {
6716   _bmStartWord = 0;
6717   _bmWordSize  = 0;
6718 }
6719 
6720 bool CMSBitMap::allocate(MemRegion mr) {
6721   _bmStartWord = mr.start();
6722   _bmWordSize  = mr.word_size();
6723   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6724                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6725   if (!brs.is_reserved()) {
6726     warning("CMS bit map allocation failure");
6727     return false;
6728   }
6729   // For now we'll just commit all of the bit map up fromt.
6730   // Later on we'll try to be more parsimonious with swap.
6731   if (!_virtual_space.initialize(brs, brs.size())) {
6732     warning("CMS bit map backing store failure");
6733     return false;
6734   }
6735   assert(_virtual_space.committed_size() == brs.size(),
6736          "didn't reserve backing store for all of CMS bit map?");
6737   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6738   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6739          _bmWordSize, "inconsistency in bit map sizing");
6740   _bm.set_size(_bmWordSize >> _shifter);
6741 
6742   // bm.clear(); // can we rely on getting zero'd memory? verify below
6743   assert(isAllClear(),
6744          "Expected zero'd memory from ReservedSpace constructor");
6745   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6746          "consistency check");
6747   return true;
6748 }
6749 
6750 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6751   HeapWord *next_addr, *end_addr, *last_addr;
6752   assert_locked();
6753   assert(covers(mr), "out-of-range error");
6754   // XXX assert that start and end are appropriately aligned
6755   for (next_addr = mr.start(), end_addr = mr.end();
6756        next_addr < end_addr; next_addr = last_addr) {
6757     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6758     last_addr = dirty_region.end();
6759     if (!dirty_region.is_empty()) {
6760       cl->do_MemRegion(dirty_region);
6761     } else {
6762       assert(last_addr == end_addr, "program logic");
6763       return;
6764     }
6765   }
6766 }
6767 
6768 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6769   _bm.print_on_error(st, prefix);
6770 }
6771 
6772 #ifndef PRODUCT
6773 void CMSBitMap::assert_locked() const {
6774   CMSLockVerifier::assert_locked(lock());
6775 }
6776 
6777 bool CMSBitMap::covers(MemRegion mr) const {
6778   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6779   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6780          "size inconsistency");
6781   return (mr.start() >= _bmStartWord) &&
6782          (mr.end()   <= endWord());
6783 }
6784 
6785 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6786     return (start >= _bmStartWord && (start + size) <= endWord());
6787 }
6788 
6789 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6790   // verify that there are no 1 bits in the interval [left, right)
6791   FalseBitMapClosure falseBitMapClosure;
6792   iterate(&falseBitMapClosure, left, right);
6793 }
6794 
6795 void CMSBitMap::region_invariant(MemRegion mr)
6796 {
6797   assert_locked();
6798   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6799   assert(!mr.is_empty(), "unexpected empty region");
6800   assert(covers(mr), "mr should be covered by bit map");
6801   // convert address range into offset range
6802   size_t start_ofs = heapWordToOffset(mr.start());
6803   // Make sure that end() is appropriately aligned
6804   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6805                         (1 << (_shifter+LogHeapWordSize))),
6806          "Misaligned mr.end()");
6807   size_t end_ofs   = heapWordToOffset(mr.end());
6808   assert(end_ofs > start_ofs, "Should mark at least one bit");
6809 }
6810 
6811 #endif
6812 
6813 bool CMSMarkStack::allocate(size_t size) {
6814   // allocate a stack of the requisite depth
6815   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6816                    size * sizeof(oop)));
6817   if (!rs.is_reserved()) {
6818     warning("CMSMarkStack allocation failure");
6819     return false;
6820   }
6821   if (!_virtual_space.initialize(rs, rs.size())) {
6822     warning("CMSMarkStack backing store failure");
6823     return false;
6824   }
6825   assert(_virtual_space.committed_size() == rs.size(),
6826          "didn't reserve backing store for all of CMS stack?");
6827   _base = (oop*)(_virtual_space.low());
6828   _index = 0;
6829   _capacity = size;
6830   NOT_PRODUCT(_max_depth = 0);
6831   return true;
6832 }
6833 
6834 // XXX FIX ME !!! In the MT case we come in here holding a
6835 // leaf lock. For printing we need to take a further lock
6836 // which has lower rank. We need to recallibrate the two
6837 // lock-ranks involved in order to be able to rpint the
6838 // messages below. (Or defer the printing to the caller.
6839 // For now we take the expedient path of just disabling the
6840 // messages for the problematic case.)
6841 void CMSMarkStack::expand() {
6842   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6843   if (_capacity == MarkStackSizeMax) {
6844     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6845       // We print a warning message only once per CMS cycle.
6846       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6847     }
6848     return;
6849   }
6850   // Double capacity if possible
6851   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6852   // Do not give up existing stack until we have managed to
6853   // get the double capacity that we desired.
6854   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6855                    new_capacity * sizeof(oop)));
6856   if (rs.is_reserved()) {
6857     // Release the backing store associated with old stack
6858     _virtual_space.release();
6859     // Reinitialize virtual space for new stack
6860     if (!_virtual_space.initialize(rs, rs.size())) {
6861       fatal("Not enough swap for expanded marking stack");
6862     }
6863     _base = (oop*)(_virtual_space.low());
6864     _index = 0;
6865     _capacity = new_capacity;
6866   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6867     // Failed to double capacity, continue;
6868     // we print a detail message only once per CMS cycle.
6869     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6870             SIZE_FORMAT"K",
6871             _capacity / K, new_capacity / K);
6872   }
6873 }
6874 
6875 
6876 // Closures
6877 // XXX: there seems to be a lot of code  duplication here;
6878 // should refactor and consolidate common code.
6879 
6880 // This closure is used to mark refs into the CMS generation in
6881 // the CMS bit map. Called at the first checkpoint. This closure
6882 // assumes that we do not need to re-mark dirty cards; if the CMS
6883 // generation on which this is used is not an oldest
6884 // generation then this will lose younger_gen cards!
6885 
6886 MarkRefsIntoClosure::MarkRefsIntoClosure(
6887   MemRegion span, CMSBitMap* bitMap):
6888     _span(span),
6889     _bitMap(bitMap)
6890 {
6891     assert(_ref_processor == NULL, "deliberately left NULL");
6892     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6893 }
6894 
6895 void MarkRefsIntoClosure::do_oop(oop obj) {
6896   // if p points into _span, then mark corresponding bit in _markBitMap
6897   assert(obj->is_oop(), "expected an oop");
6898   HeapWord* addr = (HeapWord*)obj;
6899   if (_span.contains(addr)) {
6900     // this should be made more efficient
6901     _bitMap->mark(addr);
6902   }
6903 }
6904 
6905 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6906 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6907 
6908 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6909   MemRegion span, CMSBitMap* bitMap):
6910     _span(span),
6911     _bitMap(bitMap)
6912 {
6913     assert(_ref_processor == NULL, "deliberately left NULL");
6914     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6915 }
6916 
6917 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6918   // if p points into _span, then mark corresponding bit in _markBitMap
6919   assert(obj->is_oop(), "expected an oop");
6920   HeapWord* addr = (HeapWord*)obj;
6921   if (_span.contains(addr)) {
6922     // this should be made more efficient
6923     _bitMap->par_mark(addr);
6924   }
6925 }
6926 
6927 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6928 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6929 
6930 // A variant of the above, used for CMS marking verification.
6931 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6932   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6933     _span(span),
6934     _verification_bm(verification_bm),
6935     _cms_bm(cms_bm)
6936 {
6937     assert(_ref_processor == NULL, "deliberately left NULL");
6938     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6939 }
6940 
6941 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6942   // if p points into _span, then mark corresponding bit in _markBitMap
6943   assert(obj->is_oop(), "expected an oop");
6944   HeapWord* addr = (HeapWord*)obj;
6945   if (_span.contains(addr)) {
6946     _verification_bm->mark(addr);
6947     if (!_cms_bm->isMarked(addr)) {
6948       oop(addr)->print();
6949       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6950       fatal("... aborting");
6951     }
6952   }
6953 }
6954 
6955 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6956 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6957 
6958 //////////////////////////////////////////////////
6959 // MarkRefsIntoAndScanClosure
6960 //////////////////////////////////////////////////
6961 
6962 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6963                                                        ReferenceProcessor* rp,
6964                                                        CMSBitMap* bit_map,
6965                                                        CMSBitMap* mod_union_table,
6966                                                        CMSMarkStack*  mark_stack,
6967                                                        CMSCollector* collector,
6968                                                        bool should_yield,
6969                                                        bool concurrent_precleaning):
6970   _collector(collector),
6971   _span(span),
6972   _bit_map(bit_map),
6973   _mark_stack(mark_stack),
6974   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6975                       mark_stack, concurrent_precleaning),
6976   _yield(should_yield),
6977   _concurrent_precleaning(concurrent_precleaning),
6978   _freelistLock(NULL)
6979 {
6980   _ref_processor = rp;
6981   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6982 }
6983 
6984 // This closure is used to mark refs into the CMS generation at the
6985 // second (final) checkpoint, and to scan and transitively follow
6986 // the unmarked oops. It is also used during the concurrent precleaning
6987 // phase while scanning objects on dirty cards in the CMS generation.
6988 // The marks are made in the marking bit map and the marking stack is
6989 // used for keeping the (newly) grey objects during the scan.
6990 // The parallel version (Par_...) appears further below.
6991 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6992   if (obj != NULL) {
6993     assert(obj->is_oop(), "expected an oop");
6994     HeapWord* addr = (HeapWord*)obj;
6995     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6996     assert(_collector->overflow_list_is_empty(),
6997            "overflow list should be empty");
6998     if (_span.contains(addr) &&
6999         !_bit_map->isMarked(addr)) {
7000       // mark bit map (object is now grey)
7001       _bit_map->mark(addr);
7002       // push on marking stack (stack should be empty), and drain the
7003       // stack by applying this closure to the oops in the oops popped
7004       // from the stack (i.e. blacken the grey objects)
7005       bool res = _mark_stack->push(obj);
7006       assert(res, "Should have space to push on empty stack");
7007       do {
7008         oop new_oop = _mark_stack->pop();
7009         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7010         assert(_bit_map->isMarked((HeapWord*)new_oop),
7011                "only grey objects on this stack");
7012         // iterate over the oops in this oop, marking and pushing
7013         // the ones in CMS heap (i.e. in _span).
7014         new_oop->oop_iterate(&_pushAndMarkClosure);
7015         // check if it's time to yield
7016         do_yield_check();
7017       } while (!_mark_stack->isEmpty() ||
7018                (!_concurrent_precleaning && take_from_overflow_list()));
7019         // if marking stack is empty, and we are not doing this
7020         // during precleaning, then check the overflow list
7021     }
7022     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7023     assert(_collector->overflow_list_is_empty(),
7024            "overflow list was drained above");
7025     // We could restore evacuated mark words, if any, used for
7026     // overflow list links here because the overflow list is
7027     // provably empty here. That would reduce the maximum
7028     // size requirements for preserved_{oop,mark}_stack.
7029     // But we'll just postpone it until we are all done
7030     // so we can just stream through.
7031     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
7032       _collector->restore_preserved_marks_if_any();
7033       assert(_collector->no_preserved_marks(), "No preserved marks");
7034     }
7035     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
7036            "All preserved marks should have been restored above");
7037   }
7038 }
7039 
7040 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7041 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7042 
7043 void MarkRefsIntoAndScanClosure::do_yield_work() {
7044   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7045          "CMS thread should hold CMS token");
7046   assert_lock_strong(_freelistLock);
7047   assert_lock_strong(_bit_map->lock());
7048   // relinquish the free_list_lock and bitMaplock()
7049   _bit_map->lock()->unlock();
7050   _freelistLock->unlock();
7051   ConcurrentMarkSweepThread::desynchronize(true);
7052   ConcurrentMarkSweepThread::acknowledge_yield_request();
7053   _collector->stopTimer();
7054   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7055   if (PrintCMSStatistics != 0) {
7056     _collector->incrementYields();
7057   }
7058   _collector->icms_wait();
7059 
7060   // See the comment in coordinator_yield()
7061   for (unsigned i = 0;
7062        i < CMSYieldSleepCount &&
7063        ConcurrentMarkSweepThread::should_yield() &&
7064        !CMSCollector::foregroundGCIsActive();
7065        ++i) {
7066     os::sleep(Thread::current(), 1, false);
7067     ConcurrentMarkSweepThread::acknowledge_yield_request();
7068   }
7069 
7070   ConcurrentMarkSweepThread::synchronize(true);
7071   _freelistLock->lock_without_safepoint_check();
7072   _bit_map->lock()->lock_without_safepoint_check();
7073   _collector->startTimer();
7074 }
7075 
7076 ///////////////////////////////////////////////////////////
7077 // Par_MarkRefsIntoAndScanClosure: a parallel version of
7078 //                                 MarkRefsIntoAndScanClosure
7079 ///////////////////////////////////////////////////////////
7080 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7081   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7082   CMSBitMap* bit_map, OopTaskQueue* work_queue):
7083   _span(span),
7084   _bit_map(bit_map),
7085   _work_queue(work_queue),
7086   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7087                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7088   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7089 {
7090   _ref_processor = rp;
7091   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7092 }
7093 
7094 // This closure is used to mark refs into the CMS generation at the
7095 // second (final) checkpoint, and to scan and transitively follow
7096 // the unmarked oops. The marks are made in the marking bit map and
7097 // the work_queue is used for keeping the (newly) grey objects during
7098 // the scan phase whence they are also available for stealing by parallel
7099 // threads. Since the marking bit map is shared, updates are
7100 // synchronized (via CAS).
7101 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7102   if (obj != NULL) {
7103     // Ignore mark word because this could be an already marked oop
7104     // that may be chained at the end of the overflow list.
7105     assert(obj->is_oop(true), "expected an oop");
7106     HeapWord* addr = (HeapWord*)obj;
7107     if (_span.contains(addr) &&
7108         !_bit_map->isMarked(addr)) {
7109       // mark bit map (object will become grey):
7110       // It is possible for several threads to be
7111       // trying to "claim" this object concurrently;
7112       // the unique thread that succeeds in marking the
7113       // object first will do the subsequent push on
7114       // to the work queue (or overflow list).
7115       if (_bit_map->par_mark(addr)) {
7116         // push on work_queue (which may not be empty), and trim the
7117         // queue to an appropriate length by applying this closure to
7118         // the oops in the oops popped from the stack (i.e. blacken the
7119         // grey objects)
7120         bool res = _work_queue->push(obj);
7121         assert(res, "Low water mark should be less than capacity?");
7122         trim_queue(_low_water_mark);
7123       } // Else, another thread claimed the object
7124     }
7125   }
7126 }
7127 
7128 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7129 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7130 
7131 // This closure is used to rescan the marked objects on the dirty cards
7132 // in the mod union table and the card table proper.
7133 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7134   oop p, MemRegion mr) {
7135 
7136   size_t size = 0;
7137   HeapWord* addr = (HeapWord*)p;
7138   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7139   assert(_span.contains(addr), "we are scanning the CMS generation");
7140   // check if it's time to yield
7141   if (do_yield_check()) {
7142     // We yielded for some foreground stop-world work,
7143     // and we have been asked to abort this ongoing preclean cycle.
7144     return 0;
7145   }
7146   if (_bitMap->isMarked(addr)) {
7147     // it's marked; is it potentially uninitialized?
7148     if (p->klass_or_null() != NULL) {
7149         // an initialized object; ignore mark word in verification below
7150         // since we are running concurrent with mutators
7151         assert(p->is_oop(true), "should be an oop");
7152         if (p->is_objArray()) {
7153           // objArrays are precisely marked; restrict scanning
7154           // to dirty cards only.
7155           size = CompactibleFreeListSpace::adjustObjectSize(
7156                    p->oop_iterate(_scanningClosure, mr));
7157         } else {
7158           // A non-array may have been imprecisely marked; we need
7159           // to scan object in its entirety.
7160           size = CompactibleFreeListSpace::adjustObjectSize(
7161                    p->oop_iterate(_scanningClosure));
7162         }
7163         #ifdef ASSERT
7164           size_t direct_size =
7165             CompactibleFreeListSpace::adjustObjectSize(p->size());
7166           assert(size == direct_size, "Inconsistency in size");
7167           assert(size >= 3, "Necessary for Printezis marks to work");
7168           if (!_bitMap->isMarked(addr+1)) {
7169             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7170           } else {
7171             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7172             assert(_bitMap->isMarked(addr+size-1),
7173                    "inconsistent Printezis mark");
7174           }
7175         #endif // ASSERT
7176     } else {
7177       // an unitialized object
7178       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7179       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7180       size = pointer_delta(nextOneAddr + 1, addr);
7181       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7182              "alignment problem");
7183       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7184       // will dirty the card when the klass pointer is installed in the
7185       // object (signalling the completion of initialization).
7186     }
7187   } else {
7188     // Either a not yet marked object or an uninitialized object
7189     if (p->klass_or_null() == NULL) {
7190       // An uninitialized object, skip to the next card, since
7191       // we may not be able to read its P-bits yet.
7192       assert(size == 0, "Initial value");
7193     } else {
7194       // An object not (yet) reached by marking: we merely need to
7195       // compute its size so as to go look at the next block.
7196       assert(p->is_oop(true), "should be an oop");
7197       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7198     }
7199   }
7200   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7201   return size;
7202 }
7203 
7204 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7205   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7206          "CMS thread should hold CMS token");
7207   assert_lock_strong(_freelistLock);
7208   assert_lock_strong(_bitMap->lock());
7209   // relinquish the free_list_lock and bitMaplock()
7210   _bitMap->lock()->unlock();
7211   _freelistLock->unlock();
7212   ConcurrentMarkSweepThread::desynchronize(true);
7213   ConcurrentMarkSweepThread::acknowledge_yield_request();
7214   _collector->stopTimer();
7215   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7216   if (PrintCMSStatistics != 0) {
7217     _collector->incrementYields();
7218   }
7219   _collector->icms_wait();
7220 
7221   // See the comment in coordinator_yield()
7222   for (unsigned i = 0; i < CMSYieldSleepCount &&
7223                    ConcurrentMarkSweepThread::should_yield() &&
7224                    !CMSCollector::foregroundGCIsActive(); ++i) {
7225     os::sleep(Thread::current(), 1, false);
7226     ConcurrentMarkSweepThread::acknowledge_yield_request();
7227   }
7228 
7229   ConcurrentMarkSweepThread::synchronize(true);
7230   _freelistLock->lock_without_safepoint_check();
7231   _bitMap->lock()->lock_without_safepoint_check();
7232   _collector->startTimer();
7233 }
7234 
7235 
7236 //////////////////////////////////////////////////////////////////
7237 // SurvivorSpacePrecleanClosure
7238 //////////////////////////////////////////////////////////////////
7239 // This (single-threaded) closure is used to preclean the oops in
7240 // the survivor spaces.
7241 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7242 
7243   HeapWord* addr = (HeapWord*)p;
7244   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7245   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7246   assert(p->klass_or_null() != NULL, "object should be initializd");
7247   // an initialized object; ignore mark word in verification below
7248   // since we are running concurrent with mutators
7249   assert(p->is_oop(true), "should be an oop");
7250   // Note that we do not yield while we iterate over
7251   // the interior oops of p, pushing the relevant ones
7252   // on our marking stack.
7253   size_t size = p->oop_iterate(_scanning_closure);
7254   do_yield_check();
7255   // Observe that below, we do not abandon the preclean
7256   // phase as soon as we should; rather we empty the
7257   // marking stack before returning. This is to satisfy
7258   // some existing assertions. In general, it may be a
7259   // good idea to abort immediately and complete the marking
7260   // from the grey objects at a later time.
7261   while (!_mark_stack->isEmpty()) {
7262     oop new_oop = _mark_stack->pop();
7263     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7264     assert(_bit_map->isMarked((HeapWord*)new_oop),
7265            "only grey objects on this stack");
7266     // iterate over the oops in this oop, marking and pushing
7267     // the ones in CMS heap (i.e. in _span).
7268     new_oop->oop_iterate(_scanning_closure);
7269     // check if it's time to yield
7270     do_yield_check();
7271   }
7272   unsigned int after_count =
7273     GenCollectedHeap::heap()->total_collections();
7274   bool abort = (_before_count != after_count) ||
7275                _collector->should_abort_preclean();
7276   return abort ? 0 : size;
7277 }
7278 
7279 void SurvivorSpacePrecleanClosure::do_yield_work() {
7280   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7281          "CMS thread should hold CMS token");
7282   assert_lock_strong(_bit_map->lock());
7283   // Relinquish the bit map lock
7284   _bit_map->lock()->unlock();
7285   ConcurrentMarkSweepThread::desynchronize(true);
7286   ConcurrentMarkSweepThread::acknowledge_yield_request();
7287   _collector->stopTimer();
7288   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7289   if (PrintCMSStatistics != 0) {
7290     _collector->incrementYields();
7291   }
7292   _collector->icms_wait();
7293 
7294   // See the comment in coordinator_yield()
7295   for (unsigned i = 0; i < CMSYieldSleepCount &&
7296                        ConcurrentMarkSweepThread::should_yield() &&
7297                        !CMSCollector::foregroundGCIsActive(); ++i) {
7298     os::sleep(Thread::current(), 1, false);
7299     ConcurrentMarkSweepThread::acknowledge_yield_request();
7300   }
7301 
7302   ConcurrentMarkSweepThread::synchronize(true);
7303   _bit_map->lock()->lock_without_safepoint_check();
7304   _collector->startTimer();
7305 }
7306 
7307 // This closure is used to rescan the marked objects on the dirty cards
7308 // in the mod union table and the card table proper. In the parallel
7309 // case, although the bitMap is shared, we do a single read so the
7310 // isMarked() query is "safe".
7311 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7312   // Ignore mark word because we are running concurrent with mutators
7313   assert(p->is_oop_or_null(true), "expected an oop or null");
7314   HeapWord* addr = (HeapWord*)p;
7315   assert(_span.contains(addr), "we are scanning the CMS generation");
7316   bool is_obj_array = false;
7317   #ifdef ASSERT
7318     if (!_parallel) {
7319       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7320       assert(_collector->overflow_list_is_empty(),
7321              "overflow list should be empty");
7322 
7323     }
7324   #endif // ASSERT
7325   if (_bit_map->isMarked(addr)) {
7326     // Obj arrays are precisely marked, non-arrays are not;
7327     // so we scan objArrays precisely and non-arrays in their
7328     // entirety.
7329     if (p->is_objArray()) {
7330       is_obj_array = true;
7331       if (_parallel) {
7332         p->oop_iterate(_par_scan_closure, mr);
7333       } else {
7334         p->oop_iterate(_scan_closure, mr);
7335       }
7336     } else {
7337       if (_parallel) {
7338         p->oop_iterate(_par_scan_closure);
7339       } else {
7340         p->oop_iterate(_scan_closure);
7341       }
7342     }
7343   }
7344   #ifdef ASSERT
7345     if (!_parallel) {
7346       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7347       assert(_collector->overflow_list_is_empty(),
7348              "overflow list should be empty");
7349 
7350     }
7351   #endif // ASSERT
7352   return is_obj_array;
7353 }
7354 
7355 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7356                         MemRegion span,
7357                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
7358                         bool should_yield, bool verifying):
7359   _collector(collector),
7360   _span(span),
7361   _bitMap(bitMap),
7362   _mut(&collector->_modUnionTable),
7363   _markStack(markStack),
7364   _yield(should_yield),
7365   _skipBits(0)
7366 {
7367   assert(_markStack->isEmpty(), "stack should be empty");
7368   _finger = _bitMap->startWord();
7369   _threshold = _finger;
7370   assert(_collector->_restart_addr == NULL, "Sanity check");
7371   assert(_span.contains(_finger), "Out of bounds _finger?");
7372   DEBUG_ONLY(_verifying = verifying;)
7373 }
7374 
7375 void MarkFromRootsClosure::reset(HeapWord* addr) {
7376   assert(_markStack->isEmpty(), "would cause duplicates on stack");
7377   assert(_span.contains(addr), "Out of bounds _finger?");
7378   _finger = addr;
7379   _threshold = (HeapWord*)round_to(
7380                  (intptr_t)_finger, CardTableModRefBS::card_size);
7381 }
7382 
7383 // Should revisit to see if this should be restructured for
7384 // greater efficiency.
7385 bool MarkFromRootsClosure::do_bit(size_t offset) {
7386   if (_skipBits > 0) {
7387     _skipBits--;
7388     return true;
7389   }
7390   // convert offset into a HeapWord*
7391   HeapWord* addr = _bitMap->startWord() + offset;
7392   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7393          "address out of range");
7394   assert(_bitMap->isMarked(addr), "tautology");
7395   if (_bitMap->isMarked(addr+1)) {
7396     // this is an allocated but not yet initialized object
7397     assert(_skipBits == 0, "tautology");
7398     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
7399     oop p = oop(addr);
7400     if (p->klass_or_null() == NULL) {
7401       DEBUG_ONLY(if (!_verifying) {)
7402         // We re-dirty the cards on which this object lies and increase
7403         // the _threshold so that we'll come back to scan this object
7404         // during the preclean or remark phase. (CMSCleanOnEnter)
7405         if (CMSCleanOnEnter) {
7406           size_t sz = _collector->block_size_using_printezis_bits(addr);
7407           HeapWord* end_card_addr   = (HeapWord*)round_to(
7408                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7409           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7410           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7411           // Bump _threshold to end_card_addr; note that
7412           // _threshold cannot possibly exceed end_card_addr, anyhow.
7413           // This prevents future clearing of the card as the scan proceeds
7414           // to the right.
7415           assert(_threshold <= end_card_addr,
7416                  "Because we are just scanning into this object");
7417           if (_threshold < end_card_addr) {
7418             _threshold = end_card_addr;
7419           }
7420           if (p->klass_or_null() != NULL) {
7421             // Redirty the range of cards...
7422             _mut->mark_range(redirty_range);
7423           } // ...else the setting of klass will dirty the card anyway.
7424         }
7425       DEBUG_ONLY(})
7426       return true;
7427     }
7428   }
7429   scanOopsInOop(addr);
7430   return true;
7431 }
7432 
7433 // We take a break if we've been at this for a while,
7434 // so as to avoid monopolizing the locks involved.
7435 void MarkFromRootsClosure::do_yield_work() {
7436   // First give up the locks, then yield, then re-lock
7437   // We should probably use a constructor/destructor idiom to
7438   // do this unlock/lock or modify the MutexUnlocker class to
7439   // serve our purpose. XXX
7440   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7441          "CMS thread should hold CMS token");
7442   assert_lock_strong(_bitMap->lock());
7443   _bitMap->lock()->unlock();
7444   ConcurrentMarkSweepThread::desynchronize(true);
7445   ConcurrentMarkSweepThread::acknowledge_yield_request();
7446   _collector->stopTimer();
7447   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7448   if (PrintCMSStatistics != 0) {
7449     _collector->incrementYields();
7450   }
7451   _collector->icms_wait();
7452 
7453   // See the comment in coordinator_yield()
7454   for (unsigned i = 0; i < CMSYieldSleepCount &&
7455                        ConcurrentMarkSweepThread::should_yield() &&
7456                        !CMSCollector::foregroundGCIsActive(); ++i) {
7457     os::sleep(Thread::current(), 1, false);
7458     ConcurrentMarkSweepThread::acknowledge_yield_request();
7459   }
7460 
7461   ConcurrentMarkSweepThread::synchronize(true);
7462   _bitMap->lock()->lock_without_safepoint_check();
7463   _collector->startTimer();
7464 }
7465 
7466 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7467   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7468   assert(_markStack->isEmpty(),
7469          "should drain stack to limit stack usage");
7470   // convert ptr to an oop preparatory to scanning
7471   oop obj = oop(ptr);
7472   // Ignore mark word in verification below, since we
7473   // may be running concurrent with mutators.
7474   assert(obj->is_oop(true), "should be an oop");
7475   assert(_finger <= ptr, "_finger runneth ahead");
7476   // advance the finger to right end of this object
7477   _finger = ptr + obj->size();
7478   assert(_finger > ptr, "we just incremented it above");
7479   // On large heaps, it may take us some time to get through
7480   // the marking phase (especially if running iCMS). During
7481   // this time it's possible that a lot of mutations have
7482   // accumulated in the card table and the mod union table --
7483   // these mutation records are redundant until we have
7484   // actually traced into the corresponding card.
7485   // Here, we check whether advancing the finger would make
7486   // us cross into a new card, and if so clear corresponding
7487   // cards in the MUT (preclean them in the card-table in the
7488   // future).
7489 
7490   DEBUG_ONLY(if (!_verifying) {)
7491     // The clean-on-enter optimization is disabled by default,
7492     // until we fix 6178663.
7493     if (CMSCleanOnEnter && (_finger > _threshold)) {
7494       // [_threshold, _finger) represents the interval
7495       // of cards to be cleared  in MUT (or precleaned in card table).
7496       // The set of cards to be cleared is all those that overlap
7497       // with the interval [_threshold, _finger); note that
7498       // _threshold is always kept card-aligned but _finger isn't
7499       // always card-aligned.
7500       HeapWord* old_threshold = _threshold;
7501       assert(old_threshold == (HeapWord*)round_to(
7502               (intptr_t)old_threshold, CardTableModRefBS::card_size),
7503              "_threshold should always be card-aligned");
7504       _threshold = (HeapWord*)round_to(
7505                      (intptr_t)_finger, CardTableModRefBS::card_size);
7506       MemRegion mr(old_threshold, _threshold);
7507       assert(!mr.is_empty(), "Control point invariant");
7508       assert(_span.contains(mr), "Should clear within span");
7509       _mut->clear_range(mr);
7510     }
7511   DEBUG_ONLY(})
7512   // Note: the finger doesn't advance while we drain
7513   // the stack below.
7514   PushOrMarkClosure pushOrMarkClosure(_collector,
7515                                       _span, _bitMap, _markStack,
7516                                       _finger, this);
7517   bool res = _markStack->push(obj);
7518   assert(res, "Empty non-zero size stack should have space for single push");
7519   while (!_markStack->isEmpty()) {
7520     oop new_oop = _markStack->pop();
7521     // Skip verifying header mark word below because we are
7522     // running concurrent with mutators.
7523     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7524     // now scan this oop's oops
7525     new_oop->oop_iterate(&pushOrMarkClosure);
7526     do_yield_check();
7527   }
7528   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7529 }
7530 
7531 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7532                        CMSCollector* collector, MemRegion span,
7533                        CMSBitMap* bit_map,
7534                        OopTaskQueue* work_queue,
7535                        CMSMarkStack*  overflow_stack,
7536                        bool should_yield):
7537   _collector(collector),
7538   _whole_span(collector->_span),
7539   _span(span),
7540   _bit_map(bit_map),
7541   _mut(&collector->_modUnionTable),
7542   _work_queue(work_queue),
7543   _overflow_stack(overflow_stack),
7544   _yield(should_yield),
7545   _skip_bits(0),
7546   _task(task)
7547 {
7548   assert(_work_queue->size() == 0, "work_queue should be empty");
7549   _finger = span.start();
7550   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
7551   assert(_span.contains(_finger), "Out of bounds _finger?");
7552 }
7553 
7554 // Should revisit to see if this should be restructured for
7555 // greater efficiency.
7556 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7557   if (_skip_bits > 0) {
7558     _skip_bits--;
7559     return true;
7560   }
7561   // convert offset into a HeapWord*
7562   HeapWord* addr = _bit_map->startWord() + offset;
7563   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7564          "address out of range");
7565   assert(_bit_map->isMarked(addr), "tautology");
7566   if (_bit_map->isMarked(addr+1)) {
7567     // this is an allocated object that might not yet be initialized
7568     assert(_skip_bits == 0, "tautology");
7569     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
7570     oop p = oop(addr);
7571     if (p->klass_or_null() == NULL) {
7572       // in the case of Clean-on-Enter optimization, redirty card
7573       // and avoid clearing card by increasing  the threshold.
7574       return true;
7575     }
7576   }
7577   scan_oops_in_oop(addr);
7578   return true;
7579 }
7580 
7581 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7582   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7583   // Should we assert that our work queue is empty or
7584   // below some drain limit?
7585   assert(_work_queue->size() == 0,
7586          "should drain stack to limit stack usage");
7587   // convert ptr to an oop preparatory to scanning
7588   oop obj = oop(ptr);
7589   // Ignore mark word in verification below, since we
7590   // may be running concurrent with mutators.
7591   assert(obj->is_oop(true), "should be an oop");
7592   assert(_finger <= ptr, "_finger runneth ahead");
7593   // advance the finger to right end of this object
7594   _finger = ptr + obj->size();
7595   assert(_finger > ptr, "we just incremented it above");
7596   // On large heaps, it may take us some time to get through
7597   // the marking phase (especially if running iCMS). During
7598   // this time it's possible that a lot of mutations have
7599   // accumulated in the card table and the mod union table --
7600   // these mutation records are redundant until we have
7601   // actually traced into the corresponding card.
7602   // Here, we check whether advancing the finger would make
7603   // us cross into a new card, and if so clear corresponding
7604   // cards in the MUT (preclean them in the card-table in the
7605   // future).
7606 
7607   // The clean-on-enter optimization is disabled by default,
7608   // until we fix 6178663.
7609   if (CMSCleanOnEnter && (_finger > _threshold)) {
7610     // [_threshold, _finger) represents the interval
7611     // of cards to be cleared  in MUT (or precleaned in card table).
7612     // The set of cards to be cleared is all those that overlap
7613     // with the interval [_threshold, _finger); note that
7614     // _threshold is always kept card-aligned but _finger isn't
7615     // always card-aligned.
7616     HeapWord* old_threshold = _threshold;
7617     assert(old_threshold == (HeapWord*)round_to(
7618             (intptr_t)old_threshold, CardTableModRefBS::card_size),
7619            "_threshold should always be card-aligned");
7620     _threshold = (HeapWord*)round_to(
7621                    (intptr_t)_finger, CardTableModRefBS::card_size);
7622     MemRegion mr(old_threshold, _threshold);
7623     assert(!mr.is_empty(), "Control point invariant");
7624     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7625     _mut->clear_range(mr);
7626   }
7627 
7628   // Note: the local finger doesn't advance while we drain
7629   // the stack below, but the global finger sure can and will.
7630   HeapWord** gfa = _task->global_finger_addr();
7631   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7632                                       _span, _bit_map,
7633                                       _work_queue,
7634                                       _overflow_stack,
7635                                       _finger,
7636                                       gfa, this);
7637   bool res = _work_queue->push(obj);   // overflow could occur here
7638   assert(res, "Will hold once we use workqueues");
7639   while (true) {
7640     oop new_oop;
7641     if (!_work_queue->pop_local(new_oop)) {
7642       // We emptied our work_queue; check if there's stuff that can
7643       // be gotten from the overflow stack.
7644       if (CMSConcMarkingTask::get_work_from_overflow_stack(
7645             _overflow_stack, _work_queue)) {
7646         do_yield_check();
7647         continue;
7648       } else {  // done
7649         break;
7650       }
7651     }
7652     // Skip verifying header mark word below because we are
7653     // running concurrent with mutators.
7654     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7655     // now scan this oop's oops
7656     new_oop->oop_iterate(&pushOrMarkClosure);
7657     do_yield_check();
7658   }
7659   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7660 }
7661 
7662 // Yield in response to a request from VM Thread or
7663 // from mutators.
7664 void Par_MarkFromRootsClosure::do_yield_work() {
7665   assert(_task != NULL, "sanity");
7666   _task->yield();
7667 }
7668 
7669 // A variant of the above used for verifying CMS marking work.
7670 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7671                         MemRegion span,
7672                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7673                         CMSMarkStack*  mark_stack):
7674   _collector(collector),
7675   _span(span),
7676   _verification_bm(verification_bm),
7677   _cms_bm(cms_bm),
7678   _mark_stack(mark_stack),
7679   _pam_verify_closure(collector, span, verification_bm, cms_bm,
7680                       mark_stack)
7681 {
7682   assert(_mark_stack->isEmpty(), "stack should be empty");
7683   _finger = _verification_bm->startWord();
7684   assert(_collector->_restart_addr == NULL, "Sanity check");
7685   assert(_span.contains(_finger), "Out of bounds _finger?");
7686 }
7687 
7688 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7689   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7690   assert(_span.contains(addr), "Out of bounds _finger?");
7691   _finger = addr;
7692 }
7693 
7694 // Should revisit to see if this should be restructured for
7695 // greater efficiency.
7696 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7697   // convert offset into a HeapWord*
7698   HeapWord* addr = _verification_bm->startWord() + offset;
7699   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7700          "address out of range");
7701   assert(_verification_bm->isMarked(addr), "tautology");
7702   assert(_cms_bm->isMarked(addr), "tautology");
7703 
7704   assert(_mark_stack->isEmpty(),
7705          "should drain stack to limit stack usage");
7706   // convert addr to an oop preparatory to scanning
7707   oop obj = oop(addr);
7708   assert(obj->is_oop(), "should be an oop");
7709   assert(_finger <= addr, "_finger runneth ahead");
7710   // advance the finger to right end of this object
7711   _finger = addr + obj->size();
7712   assert(_finger > addr, "we just incremented it above");
7713   // Note: the finger doesn't advance while we drain
7714   // the stack below.
7715   bool res = _mark_stack->push(obj);
7716   assert(res, "Empty non-zero size stack should have space for single push");
7717   while (!_mark_stack->isEmpty()) {
7718     oop new_oop = _mark_stack->pop();
7719     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7720     // now scan this oop's oops
7721     new_oop->oop_iterate(&_pam_verify_closure);
7722   }
7723   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7724   return true;
7725 }
7726 
7727 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7728   CMSCollector* collector, MemRegion span,
7729   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7730   CMSMarkStack*  mark_stack):
7731   CMSOopClosure(collector->ref_processor()),
7732   _collector(collector),
7733   _span(span),
7734   _verification_bm(verification_bm),
7735   _cms_bm(cms_bm),
7736   _mark_stack(mark_stack)
7737 { }
7738 
7739 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
7740 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7741 
7742 // Upon stack overflow, we discard (part of) the stack,
7743 // remembering the least address amongst those discarded
7744 // in CMSCollector's _restart_address.
7745 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7746   // Remember the least grey address discarded
7747   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7748   _collector->lower_restart_addr(ra);
7749   _mark_stack->reset();  // discard stack contents
7750   _mark_stack->expand(); // expand the stack if possible
7751 }
7752 
7753 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7754   assert(obj->is_oop_or_null(), "expected an oop or NULL");
7755   HeapWord* addr = (HeapWord*)obj;
7756   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7757     // Oop lies in _span and isn't yet grey or black
7758     _verification_bm->mark(addr);            // now grey
7759     if (!_cms_bm->isMarked(addr)) {
7760       oop(addr)->print();
7761       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7762                              addr);
7763       fatal("... aborting");
7764     }
7765 
7766     if (!_mark_stack->push(obj)) { // stack overflow
7767       if (PrintCMSStatistics != 0) {
7768         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7769                                SIZE_FORMAT, _mark_stack->capacity());
7770       }
7771       assert(_mark_stack->isFull(), "Else push should have succeeded");
7772       handle_stack_overflow(addr);
7773     }
7774     // anything including and to the right of _finger
7775     // will be scanned as we iterate over the remainder of the
7776     // bit map
7777   }
7778 }
7779 
7780 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7781                      MemRegion span,
7782                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7783                      HeapWord* finger, MarkFromRootsClosure* parent) :
7784   CMSOopClosure(collector->ref_processor()),
7785   _collector(collector),
7786   _span(span),
7787   _bitMap(bitMap),
7788   _markStack(markStack),
7789   _finger(finger),
7790   _parent(parent)
7791 { }
7792 
7793 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7794                      MemRegion span,
7795                      CMSBitMap* bit_map,
7796                      OopTaskQueue* work_queue,
7797                      CMSMarkStack*  overflow_stack,
7798                      HeapWord* finger,
7799                      HeapWord** global_finger_addr,
7800                      Par_MarkFromRootsClosure* parent) :
7801   CMSOopClosure(collector->ref_processor()),
7802   _collector(collector),
7803   _whole_span(collector->_span),
7804   _span(span),
7805   _bit_map(bit_map),
7806   _work_queue(work_queue),
7807   _overflow_stack(overflow_stack),
7808   _finger(finger),
7809   _global_finger_addr(global_finger_addr),
7810   _parent(parent)
7811 { }
7812 
7813 // Assumes thread-safe access by callers, who are
7814 // responsible for mutual exclusion.
7815 void CMSCollector::lower_restart_addr(HeapWord* low) {
7816   assert(_span.contains(low), "Out of bounds addr");
7817   if (_restart_addr == NULL) {
7818     _restart_addr = low;
7819   } else {
7820     _restart_addr = MIN2(_restart_addr, low);
7821   }
7822 }
7823 
7824 // Upon stack overflow, we discard (part of) the stack,
7825 // remembering the least address amongst those discarded
7826 // in CMSCollector's _restart_address.
7827 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7828   // Remember the least grey address discarded
7829   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7830   _collector->lower_restart_addr(ra);
7831   _markStack->reset();  // discard stack contents
7832   _markStack->expand(); // expand the stack if possible
7833 }
7834 
7835 // Upon stack overflow, we discard (part of) the stack,
7836 // remembering the least address amongst those discarded
7837 // in CMSCollector's _restart_address.
7838 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7839   // We need to do this under a mutex to prevent other
7840   // workers from interfering with the work done below.
7841   MutexLockerEx ml(_overflow_stack->par_lock(),
7842                    Mutex::_no_safepoint_check_flag);
7843   // Remember the least grey address discarded
7844   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7845   _collector->lower_restart_addr(ra);
7846   _overflow_stack->reset();  // discard stack contents
7847   _overflow_stack->expand(); // expand the stack if possible
7848 }
7849 
7850 void CMKlassClosure::do_klass(Klass* k) {
7851   assert(_oop_closure != NULL, "Not initialized?");
7852   k->oops_do(_oop_closure);
7853 }
7854 
7855 void PushOrMarkClosure::do_oop(oop obj) {
7856   // Ignore mark word because we are running concurrent with mutators.
7857   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7858   HeapWord* addr = (HeapWord*)obj;
7859   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7860     // Oop lies in _span and isn't yet grey or black
7861     _bitMap->mark(addr);            // now grey
7862     if (addr < _finger) {
7863       // the bit map iteration has already either passed, or
7864       // sampled, this bit in the bit map; we'll need to
7865       // use the marking stack to scan this oop's oops.
7866       bool simulate_overflow = false;
7867       NOT_PRODUCT(
7868         if (CMSMarkStackOverflowALot &&
7869             _collector->simulate_overflow()) {
7870           // simulate a stack overflow
7871           simulate_overflow = true;
7872         }
7873       )
7874       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7875         if (PrintCMSStatistics != 0) {
7876           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7877                                  SIZE_FORMAT, _markStack->capacity());
7878         }
7879         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7880         handle_stack_overflow(addr);
7881       }
7882     }
7883     // anything including and to the right of _finger
7884     // will be scanned as we iterate over the remainder of the
7885     // bit map
7886     do_yield_check();
7887   }
7888 }
7889 
7890 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7891 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7892 
7893 void Par_PushOrMarkClosure::do_oop(oop obj) {
7894   // Ignore mark word because we are running concurrent with mutators.
7895   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7896   HeapWord* addr = (HeapWord*)obj;
7897   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7898     // Oop lies in _span and isn't yet grey or black
7899     // We read the global_finger (volatile read) strictly after marking oop
7900     bool res = _bit_map->par_mark(addr);    // now grey
7901     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7902     // Should we push this marked oop on our stack?
7903     // -- if someone else marked it, nothing to do
7904     // -- if target oop is above global finger nothing to do
7905     // -- if target oop is in chunk and above local finger
7906     //      then nothing to do
7907     // -- else push on work queue
7908     if (   !res       // someone else marked it, they will deal with it
7909         || (addr >= *gfa)  // will be scanned in a later task
7910         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7911       return;
7912     }
7913     // the bit map iteration has already either passed, or
7914     // sampled, this bit in the bit map; we'll need to
7915     // use the marking stack to scan this oop's oops.
7916     bool simulate_overflow = false;
7917     NOT_PRODUCT(
7918       if (CMSMarkStackOverflowALot &&
7919           _collector->simulate_overflow()) {
7920         // simulate a stack overflow
7921         simulate_overflow = true;
7922       }
7923     )
7924     if (simulate_overflow ||
7925         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7926       // stack overflow
7927       if (PrintCMSStatistics != 0) {
7928         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7929                                SIZE_FORMAT, _overflow_stack->capacity());
7930       }
7931       // We cannot assert that the overflow stack is full because
7932       // it may have been emptied since.
7933       assert(simulate_overflow ||
7934              _work_queue->size() == _work_queue->max_elems(),
7935             "Else push should have succeeded");
7936       handle_stack_overflow(addr);
7937     }
7938     do_yield_check();
7939   }
7940 }
7941 
7942 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7943 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7944 
7945 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7946                                        MemRegion span,
7947                                        ReferenceProcessor* rp,
7948                                        CMSBitMap* bit_map,
7949                                        CMSBitMap* mod_union_table,
7950                                        CMSMarkStack*  mark_stack,
7951                                        bool           concurrent_precleaning):
7952   CMSOopClosure(rp),
7953   _collector(collector),
7954   _span(span),
7955   _bit_map(bit_map),
7956   _mod_union_table(mod_union_table),
7957   _mark_stack(mark_stack),
7958   _concurrent_precleaning(concurrent_precleaning)
7959 {
7960   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7961 }
7962 
7963 // Grey object rescan during pre-cleaning and second checkpoint phases --
7964 // the non-parallel version (the parallel version appears further below.)
7965 void PushAndMarkClosure::do_oop(oop obj) {
7966   // Ignore mark word verification. If during concurrent precleaning,
7967   // the object monitor may be locked. If during the checkpoint
7968   // phases, the object may already have been reached by a  different
7969   // path and may be at the end of the global overflow list (so
7970   // the mark word may be NULL).
7971   assert(obj->is_oop_or_null(true /* ignore mark word */),
7972          "expected an oop or NULL");
7973   HeapWord* addr = (HeapWord*)obj;
7974   // Check if oop points into the CMS generation
7975   // and is not marked
7976   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7977     // a white object ...
7978     _bit_map->mark(addr);         // ... now grey
7979     // push on the marking stack (grey set)
7980     bool simulate_overflow = false;
7981     NOT_PRODUCT(
7982       if (CMSMarkStackOverflowALot &&
7983           _collector->simulate_overflow()) {
7984         // simulate a stack overflow
7985         simulate_overflow = true;
7986       }
7987     )
7988     if (simulate_overflow || !_mark_stack->push(obj)) {
7989       if (_concurrent_precleaning) {
7990          // During precleaning we can just dirty the appropriate card(s)
7991          // in the mod union table, thus ensuring that the object remains
7992          // in the grey set  and continue. In the case of object arrays
7993          // we need to dirty all of the cards that the object spans,
7994          // since the rescan of object arrays will be limited to the
7995          // dirty cards.
7996          // Note that no one can be intefering with us in this action
7997          // of dirtying the mod union table, so no locking or atomics
7998          // are required.
7999          if (obj->is_objArray()) {
8000            size_t sz = obj->size();
8001            HeapWord* end_card_addr = (HeapWord*)round_to(
8002                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
8003            MemRegion redirty_range = MemRegion(addr, end_card_addr);
8004            assert(!redirty_range.is_empty(), "Arithmetical tautology");
8005            _mod_union_table->mark_range(redirty_range);
8006          } else {
8007            _mod_union_table->mark(addr);
8008          }
8009          _collector->_ser_pmc_preclean_ovflw++;
8010       } else {
8011          // During the remark phase, we need to remember this oop
8012          // in the overflow list.
8013          _collector->push_on_overflow_list(obj);
8014          _collector->_ser_pmc_remark_ovflw++;
8015       }
8016     }
8017   }
8018 }
8019 
8020 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8021                                                MemRegion span,
8022                                                ReferenceProcessor* rp,
8023                                                CMSBitMap* bit_map,
8024                                                OopTaskQueue* work_queue):
8025   CMSOopClosure(rp),
8026   _collector(collector),
8027   _span(span),
8028   _bit_map(bit_map),
8029   _work_queue(work_queue)
8030 {
8031   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8032 }
8033 
8034 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
8035 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8036 
8037 // Grey object rescan during second checkpoint phase --
8038 // the parallel version.
8039 void Par_PushAndMarkClosure::do_oop(oop obj) {
8040   // In the assert below, we ignore the mark word because
8041   // this oop may point to an already visited object that is
8042   // on the overflow stack (in which case the mark word has
8043   // been hijacked for chaining into the overflow stack --
8044   // if this is the last object in the overflow stack then
8045   // its mark word will be NULL). Because this object may
8046   // have been subsequently popped off the global overflow
8047   // stack, and the mark word possibly restored to the prototypical
8048   // value, by the time we get to examined this failing assert in
8049   // the debugger, is_oop_or_null(false) may subsequently start
8050   // to hold.
8051   assert(obj->is_oop_or_null(true),
8052          "expected an oop or NULL");
8053   HeapWord* addr = (HeapWord*)obj;
8054   // Check if oop points into the CMS generation
8055   // and is not marked
8056   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8057     // a white object ...
8058     // If we manage to "claim" the object, by being the
8059     // first thread to mark it, then we push it on our
8060     // marking stack
8061     if (_bit_map->par_mark(addr)) {     // ... now grey
8062       // push on work queue (grey set)
8063       bool simulate_overflow = false;
8064       NOT_PRODUCT(
8065         if (CMSMarkStackOverflowALot &&
8066             _collector->par_simulate_overflow()) {
8067           // simulate a stack overflow
8068           simulate_overflow = true;
8069         }
8070       )
8071       if (simulate_overflow || !_work_queue->push(obj)) {
8072         _collector->par_push_on_overflow_list(obj);
8073         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
8074       }
8075     } // Else, some other thread got there first
8076   }
8077 }
8078 
8079 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
8080 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8081 
8082 void CMSPrecleanRefsYieldClosure::do_yield_work() {
8083   Mutex* bml = _collector->bitMapLock();
8084   assert_lock_strong(bml);
8085   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8086          "CMS thread should hold CMS token");
8087 
8088   bml->unlock();
8089   ConcurrentMarkSweepThread::desynchronize(true);
8090 
8091   ConcurrentMarkSweepThread::acknowledge_yield_request();
8092 
8093   _collector->stopTimer();
8094   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8095   if (PrintCMSStatistics != 0) {
8096     _collector->incrementYields();
8097   }
8098   _collector->icms_wait();
8099 
8100   // See the comment in coordinator_yield()
8101   for (unsigned i = 0; i < CMSYieldSleepCount &&
8102                        ConcurrentMarkSweepThread::should_yield() &&
8103                        !CMSCollector::foregroundGCIsActive(); ++i) {
8104     os::sleep(Thread::current(), 1, false);
8105     ConcurrentMarkSweepThread::acknowledge_yield_request();
8106   }
8107 
8108   ConcurrentMarkSweepThread::synchronize(true);
8109   bml->lock();
8110 
8111   _collector->startTimer();
8112 }
8113 
8114 bool CMSPrecleanRefsYieldClosure::should_return() {
8115   if (ConcurrentMarkSweepThread::should_yield()) {
8116     do_yield_work();
8117   }
8118   return _collector->foregroundGCIsActive();
8119 }
8120 
8121 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8122   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8123          "mr should be aligned to start at a card boundary");
8124   // We'd like to assert:
8125   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8126   //        "mr should be a range of cards");
8127   // However, that would be too strong in one case -- the last
8128   // partition ends at _unallocated_block which, in general, can be
8129   // an arbitrary boundary, not necessarily card aligned.
8130   if (PrintCMSStatistics != 0) {
8131     _num_dirty_cards +=
8132          mr.word_size()/CardTableModRefBS::card_size_in_words;
8133   }
8134   _space->object_iterate_mem(mr, &_scan_cl);
8135 }
8136 
8137 SweepClosure::SweepClosure(CMSCollector* collector,
8138                            ConcurrentMarkSweepGeneration* g,
8139                            CMSBitMap* bitMap, bool should_yield) :
8140   _collector(collector),
8141   _g(g),
8142   _sp(g->cmsSpace()),
8143   _limit(_sp->sweep_limit()),
8144   _freelistLock(_sp->freelistLock()),
8145   _bitMap(bitMap),
8146   _yield(should_yield),
8147   _inFreeRange(false),           // No free range at beginning of sweep
8148   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
8149   _lastFreeRangeCoalesced(false),
8150   _freeFinger(g->used_region().start())
8151 {
8152   NOT_PRODUCT(
8153     _numObjectsFreed = 0;
8154     _numWordsFreed   = 0;
8155     _numObjectsLive = 0;
8156     _numWordsLive = 0;
8157     _numObjectsAlreadyFree = 0;
8158     _numWordsAlreadyFree = 0;
8159     _last_fc = NULL;
8160 
8161     _sp->initializeIndexedFreeListArrayReturnedBytes();
8162     _sp->dictionary()->initialize_dict_returned_bytes();
8163   )
8164   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8165          "sweep _limit out of bounds");
8166   if (CMSTraceSweeper) {
8167     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8168                         _limit);
8169   }
8170 }
8171 
8172 void SweepClosure::print_on(outputStream* st) const {
8173   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8174                 _sp->bottom(), _sp->end());
8175   tty->print_cr("_limit = " PTR_FORMAT, _limit);
8176   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8177   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8178   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8179                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8180 }
8181 
8182 #ifndef PRODUCT
8183 // Assertion checking only:  no useful work in product mode --
8184 // however, if any of the flags below become product flags,
8185 // you may need to review this code to see if it needs to be
8186 // enabled in product mode.
8187 SweepClosure::~SweepClosure() {
8188   assert_lock_strong(_freelistLock);
8189   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8190          "sweep _limit out of bounds");
8191   if (inFreeRange()) {
8192     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8193     print();
8194     ShouldNotReachHere();
8195   }
8196   if (Verbose && PrintGC) {
8197     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8198                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8199     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
8200                            SIZE_FORMAT" bytes  "
8201       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8202       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8203       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8204     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8205                         * sizeof(HeapWord);
8206     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8207 
8208     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8209       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8210       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8211       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8212       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8213       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
8214         indexListReturnedBytes);
8215       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
8216         dict_returned_bytes);
8217     }
8218   }
8219   if (CMSTraceSweeper) {
8220     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8221                            _limit);
8222   }
8223 }
8224 #endif  // PRODUCT
8225 
8226 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8227     bool freeRangeInFreeLists) {
8228   if (CMSTraceSweeper) {
8229     gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8230                freeFinger, freeRangeInFreeLists);
8231   }
8232   assert(!inFreeRange(), "Trampling existing free range");
8233   set_inFreeRange(true);
8234   set_lastFreeRangeCoalesced(false);
8235 
8236   set_freeFinger(freeFinger);
8237   set_freeRangeInFreeLists(freeRangeInFreeLists);
8238   if (CMSTestInFreeList) {
8239     if (freeRangeInFreeLists) {
8240       FreeChunk* fc = (FreeChunk*) freeFinger;
8241       assert(fc->is_free(), "A chunk on the free list should be free.");
8242       assert(fc->size() > 0, "Free range should have a size");
8243       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8244     }
8245   }
8246 }
8247 
8248 // Note that the sweeper runs concurrently with mutators. Thus,
8249 // it is possible for direct allocation in this generation to happen
8250 // in the middle of the sweep. Note that the sweeper also coalesces
8251 // contiguous free blocks. Thus, unless the sweeper and the allocator
8252 // synchronize appropriately freshly allocated blocks may get swept up.
8253 // This is accomplished by the sweeper locking the free lists while
8254 // it is sweeping. Thus blocks that are determined to be free are
8255 // indeed free. There is however one additional complication:
8256 // blocks that have been allocated since the final checkpoint and
8257 // mark, will not have been marked and so would be treated as
8258 // unreachable and swept up. To prevent this, the allocator marks
8259 // the bit map when allocating during the sweep phase. This leads,
8260 // however, to a further complication -- objects may have been allocated
8261 // but not yet initialized -- in the sense that the header isn't yet
8262 // installed. The sweeper can not then determine the size of the block
8263 // in order to skip over it. To deal with this case, we use a technique
8264 // (due to Printezis) to encode such uninitialized block sizes in the
8265 // bit map. Since the bit map uses a bit per every HeapWord, but the
8266 // CMS generation has a minimum object size of 3 HeapWords, it follows
8267 // that "normal marks" won't be adjacent in the bit map (there will
8268 // always be at least two 0 bits between successive 1 bits). We make use
8269 // of these "unused" bits to represent uninitialized blocks -- the bit
8270 // corresponding to the start of the uninitialized object and the next
8271 // bit are both set. Finally, a 1 bit marks the end of the object that
8272 // started with the two consecutive 1 bits to indicate its potentially
8273 // uninitialized state.
8274 
8275 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8276   FreeChunk* fc = (FreeChunk*)addr;
8277   size_t res;
8278 
8279   // Check if we are done sweeping. Below we check "addr >= _limit" rather
8280   // than "addr == _limit" because although _limit was a block boundary when
8281   // we started the sweep, it may no longer be one because heap expansion
8282   // may have caused us to coalesce the block ending at the address _limit
8283   // with a newly expanded chunk (this happens when _limit was set to the
8284   // previous _end of the space), so we may have stepped past _limit:
8285   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8286   if (addr >= _limit) { // we have swept up to or past the limit: finish up
8287     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8288            "sweep _limit out of bounds");
8289     assert(addr < _sp->end(), "addr out of bounds");
8290     // Flush any free range we might be holding as a single
8291     // coalesced chunk to the appropriate free list.
8292     if (inFreeRange()) {
8293       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8294              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8295       flush_cur_free_chunk(freeFinger(),
8296                            pointer_delta(addr, freeFinger()));
8297       if (CMSTraceSweeper) {
8298         gclog_or_tty->print("Sweep: last chunk: ");
8299         gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8300                    "[coalesced:"SIZE_FORMAT"]\n",
8301                    freeFinger(), pointer_delta(addr, freeFinger()),
8302                    lastFreeRangeCoalesced());
8303       }
8304     }
8305 
8306     // help the iterator loop finish
8307     return pointer_delta(_sp->end(), addr);
8308   }
8309 
8310   assert(addr < _limit, "sweep invariant");
8311   // check if we should yield
8312   do_yield_check(addr);
8313   if (fc->is_free()) {
8314     // Chunk that is already free
8315     res = fc->size();
8316     do_already_free_chunk(fc);
8317     debug_only(_sp->verifyFreeLists());
8318     // If we flush the chunk at hand in lookahead_and_flush()
8319     // and it's coalesced with a preceding chunk, then the
8320     // process of "mangling" the payload of the coalesced block
8321     // will cause erasure of the size information from the
8322     // (erstwhile) header of all the coalesced blocks but the
8323     // first, so the first disjunct in the assert will not hold
8324     // in that specific case (in which case the second disjunct
8325     // will hold).
8326     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8327            "Otherwise the size info doesn't change at this step");
8328     NOT_PRODUCT(
8329       _numObjectsAlreadyFree++;
8330       _numWordsAlreadyFree += res;
8331     )
8332     NOT_PRODUCT(_last_fc = fc;)
8333   } else if (!_bitMap->isMarked(addr)) {
8334     // Chunk is fresh garbage
8335     res = do_garbage_chunk(fc);
8336     debug_only(_sp->verifyFreeLists());
8337     NOT_PRODUCT(
8338       _numObjectsFreed++;
8339       _numWordsFreed += res;
8340     )
8341   } else {
8342     // Chunk that is alive.
8343     res = do_live_chunk(fc);
8344     debug_only(_sp->verifyFreeLists());
8345     NOT_PRODUCT(
8346         _numObjectsLive++;
8347         _numWordsLive += res;
8348     )
8349   }
8350   return res;
8351 }
8352 
8353 // For the smart allocation, record following
8354 //  split deaths - a free chunk is removed from its free list because
8355 //      it is being split into two or more chunks.
8356 //  split birth - a free chunk is being added to its free list because
8357 //      a larger free chunk has been split and resulted in this free chunk.
8358 //  coal death - a free chunk is being removed from its free list because
8359 //      it is being coalesced into a large free chunk.
8360 //  coal birth - a free chunk is being added to its free list because
8361 //      it was created when two or more free chunks where coalesced into
8362 //      this free chunk.
8363 //
8364 // These statistics are used to determine the desired number of free
8365 // chunks of a given size.  The desired number is chosen to be relative
8366 // to the end of a CMS sweep.  The desired number at the end of a sweep
8367 // is the
8368 //      count-at-end-of-previous-sweep (an amount that was enough)
8369 //              - count-at-beginning-of-current-sweep  (the excess)
8370 //              + split-births  (gains in this size during interval)
8371 //              - split-deaths  (demands on this size during interval)
8372 // where the interval is from the end of one sweep to the end of the
8373 // next.
8374 //
8375 // When sweeping the sweeper maintains an accumulated chunk which is
8376 // the chunk that is made up of chunks that have been coalesced.  That
8377 // will be termed the left-hand chunk.  A new chunk of garbage that
8378 // is being considered for coalescing will be referred to as the
8379 // right-hand chunk.
8380 //
8381 // When making a decision on whether to coalesce a right-hand chunk with
8382 // the current left-hand chunk, the current count vs. the desired count
8383 // of the left-hand chunk is considered.  Also if the right-hand chunk
8384 // is near the large chunk at the end of the heap (see
8385 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8386 // left-hand chunk is coalesced.
8387 //
8388 // When making a decision about whether to split a chunk, the desired count
8389 // vs. the current count of the candidate to be split is also considered.
8390 // If the candidate is underpopulated (currently fewer chunks than desired)
8391 // a chunk of an overpopulated (currently more chunks than desired) size may
8392 // be chosen.  The "hint" associated with a free list, if non-null, points
8393 // to a free list which may be overpopulated.
8394 //
8395 
8396 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8397   const size_t size = fc->size();
8398   // Chunks that cannot be coalesced are not in the
8399   // free lists.
8400   if (CMSTestInFreeList && !fc->cantCoalesce()) {
8401     assert(_sp->verify_chunk_in_free_list(fc),
8402       "free chunk should be in free lists");
8403   }
8404   // a chunk that is already free, should not have been
8405   // marked in the bit map
8406   HeapWord* const addr = (HeapWord*) fc;
8407   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8408   // Verify that the bit map has no bits marked between
8409   // addr and purported end of this block.
8410   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8411 
8412   // Some chunks cannot be coalesced under any circumstances.
8413   // See the definition of cantCoalesce().
8414   if (!fc->cantCoalesce()) {
8415     // This chunk can potentially be coalesced.
8416     if (_sp->adaptive_freelists()) {
8417       // All the work is done in
8418       do_post_free_or_garbage_chunk(fc, size);
8419     } else {  // Not adaptive free lists
8420       // this is a free chunk that can potentially be coalesced by the sweeper;
8421       if (!inFreeRange()) {
8422         // if the next chunk is a free block that can't be coalesced
8423         // it doesn't make sense to remove this chunk from the free lists
8424         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8425         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8426         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
8427             nextChunk->is_free()               &&     // ... which is free...
8428             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
8429           // nothing to do
8430         } else {
8431           // Potentially the start of a new free range:
8432           // Don't eagerly remove it from the free lists.
8433           // No need to remove it if it will just be put
8434           // back again.  (Also from a pragmatic point of view
8435           // if it is a free block in a region that is beyond
8436           // any allocated blocks, an assertion will fail)
8437           // Remember the start of a free run.
8438           initialize_free_range(addr, true);
8439           // end - can coalesce with next chunk
8440         }
8441       } else {
8442         // the midst of a free range, we are coalescing
8443         print_free_block_coalesced(fc);
8444         if (CMSTraceSweeper) {
8445           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
8446         }
8447         // remove it from the free lists
8448         _sp->removeFreeChunkFromFreeLists(fc);
8449         set_lastFreeRangeCoalesced(true);
8450         // If the chunk is being coalesced and the current free range is
8451         // in the free lists, remove the current free range so that it
8452         // will be returned to the free lists in its entirety - all
8453         // the coalesced pieces included.
8454         if (freeRangeInFreeLists()) {
8455           FreeChunk* ffc = (FreeChunk*) freeFinger();
8456           assert(ffc->size() == pointer_delta(addr, freeFinger()),
8457             "Size of free range is inconsistent with chunk size.");
8458           if (CMSTestInFreeList) {
8459             assert(_sp->verify_chunk_in_free_list(ffc),
8460               "free range is not in free lists");
8461           }
8462           _sp->removeFreeChunkFromFreeLists(ffc);
8463           set_freeRangeInFreeLists(false);
8464         }
8465       }
8466     }
8467     // Note that if the chunk is not coalescable (the else arm
8468     // below), we unconditionally flush, without needing to do
8469     // a "lookahead," as we do below.
8470     if (inFreeRange()) lookahead_and_flush(fc, size);
8471   } else {
8472     // Code path common to both original and adaptive free lists.
8473 
8474     // cant coalesce with previous block; this should be treated
8475     // as the end of a free run if any
8476     if (inFreeRange()) {
8477       // we kicked some butt; time to pick up the garbage
8478       assert(freeFinger() < addr, "freeFinger points too high");
8479       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8480     }
8481     // else, nothing to do, just continue
8482   }
8483 }
8484 
8485 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8486   // This is a chunk of garbage.  It is not in any free list.
8487   // Add it to a free list or let it possibly be coalesced into
8488   // a larger chunk.
8489   HeapWord* const addr = (HeapWord*) fc;
8490   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8491 
8492   if (_sp->adaptive_freelists()) {
8493     // Verify that the bit map has no bits marked between
8494     // addr and purported end of just dead object.
8495     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8496 
8497     do_post_free_or_garbage_chunk(fc, size);
8498   } else {
8499     if (!inFreeRange()) {
8500       // start of a new free range
8501       assert(size > 0, "A free range should have a size");
8502       initialize_free_range(addr, false);
8503     } else {
8504       // this will be swept up when we hit the end of the
8505       // free range
8506       if (CMSTraceSweeper) {
8507         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
8508       }
8509       // If the chunk is being coalesced and the current free range is
8510       // in the free lists, remove the current free range so that it
8511       // will be returned to the free lists in its entirety - all
8512       // the coalesced pieces included.
8513       if (freeRangeInFreeLists()) {
8514         FreeChunk* ffc = (FreeChunk*)freeFinger();
8515         assert(ffc->size() == pointer_delta(addr, freeFinger()),
8516           "Size of free range is inconsistent with chunk size.");
8517         if (CMSTestInFreeList) {
8518           assert(_sp->verify_chunk_in_free_list(ffc),
8519             "free range is not in free lists");
8520         }
8521         _sp->removeFreeChunkFromFreeLists(ffc);
8522         set_freeRangeInFreeLists(false);
8523       }
8524       set_lastFreeRangeCoalesced(true);
8525     }
8526     // this will be swept up when we hit the end of the free range
8527 
8528     // Verify that the bit map has no bits marked between
8529     // addr and purported end of just dead object.
8530     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8531   }
8532   assert(_limit >= addr + size,
8533          "A freshly garbage chunk can't possibly straddle over _limit");
8534   if (inFreeRange()) lookahead_and_flush(fc, size);
8535   return size;
8536 }
8537 
8538 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8539   HeapWord* addr = (HeapWord*) fc;
8540   // The sweeper has just found a live object. Return any accumulated
8541   // left hand chunk to the free lists.
8542   if (inFreeRange()) {
8543     assert(freeFinger() < addr, "freeFinger points too high");
8544     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8545   }
8546 
8547   // This object is live: we'd normally expect this to be
8548   // an oop, and like to assert the following:
8549   // assert(oop(addr)->is_oop(), "live block should be an oop");
8550   // However, as we commented above, this may be an object whose
8551   // header hasn't yet been initialized.
8552   size_t size;
8553   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8554   if (_bitMap->isMarked(addr + 1)) {
8555     // Determine the size from the bit map, rather than trying to
8556     // compute it from the object header.
8557     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8558     size = pointer_delta(nextOneAddr + 1, addr);
8559     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8560            "alignment problem");
8561 
8562 #ifdef ASSERT
8563       if (oop(addr)->klass_or_null() != NULL) {
8564         // Ignore mark word because we are running concurrent with mutators
8565         assert(oop(addr)->is_oop(true), "live block should be an oop");
8566         assert(size ==
8567                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8568                "P-mark and computed size do not agree");
8569       }
8570 #endif
8571 
8572   } else {
8573     // This should be an initialized object that's alive.
8574     assert(oop(addr)->klass_or_null() != NULL,
8575            "Should be an initialized object");
8576     // Ignore mark word because we are running concurrent with mutators
8577     assert(oop(addr)->is_oop(true), "live block should be an oop");
8578     // Verify that the bit map has no bits marked between
8579     // addr and purported end of this block.
8580     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8581     assert(size >= 3, "Necessary for Printezis marks to work");
8582     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8583     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8584   }
8585   return size;
8586 }
8587 
8588 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8589                                                  size_t chunkSize) {
8590   // do_post_free_or_garbage_chunk() should only be called in the case
8591   // of the adaptive free list allocator.
8592   const bool fcInFreeLists = fc->is_free();
8593   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8594   assert((HeapWord*)fc <= _limit, "sweep invariant");
8595   if (CMSTestInFreeList && fcInFreeLists) {
8596     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8597   }
8598 
8599   if (CMSTraceSweeper) {
8600     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8601   }
8602 
8603   HeapWord* const fc_addr = (HeapWord*) fc;
8604 
8605   bool coalesce;
8606   const size_t left  = pointer_delta(fc_addr, freeFinger());
8607   const size_t right = chunkSize;
8608   switch (FLSCoalescePolicy) {
8609     // numeric value forms a coalition aggressiveness metric
8610     case 0:  { // never coalesce
8611       coalesce = false;
8612       break;
8613     }
8614     case 1: { // coalesce if left & right chunks on overpopulated lists
8615       coalesce = _sp->coalOverPopulated(left) &&
8616                  _sp->coalOverPopulated(right);
8617       break;
8618     }
8619     case 2: { // coalesce if left chunk on overpopulated list (default)
8620       coalesce = _sp->coalOverPopulated(left);
8621       break;
8622     }
8623     case 3: { // coalesce if left OR right chunk on overpopulated list
8624       coalesce = _sp->coalOverPopulated(left) ||
8625                  _sp->coalOverPopulated(right);
8626       break;
8627     }
8628     case 4: { // always coalesce
8629       coalesce = true;
8630       break;
8631     }
8632     default:
8633      ShouldNotReachHere();
8634   }
8635 
8636   // Should the current free range be coalesced?
8637   // If the chunk is in a free range and either we decided to coalesce above
8638   // or the chunk is near the large block at the end of the heap
8639   // (isNearLargestChunk() returns true), then coalesce this chunk.
8640   const bool doCoalesce = inFreeRange()
8641                           && (coalesce || _g->isNearLargestChunk(fc_addr));
8642   if (doCoalesce) {
8643     // Coalesce the current free range on the left with the new
8644     // chunk on the right.  If either is on a free list,
8645     // it must be removed from the list and stashed in the closure.
8646     if (freeRangeInFreeLists()) {
8647       FreeChunk* const ffc = (FreeChunk*)freeFinger();
8648       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8649         "Size of free range is inconsistent with chunk size.");
8650       if (CMSTestInFreeList) {
8651         assert(_sp->verify_chunk_in_free_list(ffc),
8652           "Chunk is not in free lists");
8653       }
8654       _sp->coalDeath(ffc->size());
8655       _sp->removeFreeChunkFromFreeLists(ffc);
8656       set_freeRangeInFreeLists(false);
8657     }
8658     if (fcInFreeLists) {
8659       _sp->coalDeath(chunkSize);
8660       assert(fc->size() == chunkSize,
8661         "The chunk has the wrong size or is not in the free lists");
8662       _sp->removeFreeChunkFromFreeLists(fc);
8663     }
8664     set_lastFreeRangeCoalesced(true);
8665     print_free_block_coalesced(fc);
8666   } else {  // not in a free range and/or should not coalesce
8667     // Return the current free range and start a new one.
8668     if (inFreeRange()) {
8669       // In a free range but cannot coalesce with the right hand chunk.
8670       // Put the current free range into the free lists.
8671       flush_cur_free_chunk(freeFinger(),
8672                            pointer_delta(fc_addr, freeFinger()));
8673     }
8674     // Set up for new free range.  Pass along whether the right hand
8675     // chunk is in the free lists.
8676     initialize_free_range((HeapWord*)fc, fcInFreeLists);
8677   }
8678 }
8679 
8680 // Lookahead flush:
8681 // If we are tracking a free range, and this is the last chunk that
8682 // we'll look at because its end crosses past _limit, we'll preemptively
8683 // flush it along with any free range we may be holding on to. Note that
8684 // this can be the case only for an already free or freshly garbage
8685 // chunk. If this block is an object, it can never straddle
8686 // over _limit. The "straddling" occurs when _limit is set at
8687 // the previous end of the space when this cycle started, and
8688 // a subsequent heap expansion caused the previously co-terminal
8689 // free block to be coalesced with the newly expanded portion,
8690 // thus rendering _limit a non-block-boundary making it dangerous
8691 // for the sweeper to step over and examine.
8692 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8693   assert(inFreeRange(), "Should only be called if currently in a free range.");
8694   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8695   assert(_sp->used_region().contains(eob - 1),
8696          err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8697                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8698                  _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8699   if (eob >= _limit) {
8700     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8701     if (CMSTraceSweeper) {
8702       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8703                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8704                              "[" PTR_FORMAT "," PTR_FORMAT ")",
8705                              _limit, fc, eob, _sp->bottom(), _sp->end());
8706     }
8707     // Return the storage we are tracking back into the free lists.
8708     if (CMSTraceSweeper) {
8709       gclog_or_tty->print_cr("Flushing ... ");
8710     }
8711     assert(freeFinger() < eob, "Error");
8712     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8713   }
8714 }
8715 
8716 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8717   assert(inFreeRange(), "Should only be called if currently in a free range.");
8718   assert(size > 0,
8719     "A zero sized chunk cannot be added to the free lists.");
8720   if (!freeRangeInFreeLists()) {
8721     if (CMSTestInFreeList) {
8722       FreeChunk* fc = (FreeChunk*) chunk;
8723       fc->set_size(size);
8724       assert(!_sp->verify_chunk_in_free_list(fc),
8725         "chunk should not be in free lists yet");
8726     }
8727     if (CMSTraceSweeper) {
8728       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8729                     chunk, size);
8730     }
8731     // A new free range is going to be starting.  The current
8732     // free range has not been added to the free lists yet or
8733     // was removed so add it back.
8734     // If the current free range was coalesced, then the death
8735     // of the free range was recorded.  Record a birth now.
8736     if (lastFreeRangeCoalesced()) {
8737       _sp->coalBirth(size);
8738     }
8739     _sp->addChunkAndRepairOffsetTable(chunk, size,
8740             lastFreeRangeCoalesced());
8741   } else if (CMSTraceSweeper) {
8742     gclog_or_tty->print_cr("Already in free list: nothing to flush");
8743   }
8744   set_inFreeRange(false);
8745   set_freeRangeInFreeLists(false);
8746 }
8747 
8748 // We take a break if we've been at this for a while,
8749 // so as to avoid monopolizing the locks involved.
8750 void SweepClosure::do_yield_work(HeapWord* addr) {
8751   // Return current free chunk being used for coalescing (if any)
8752   // to the appropriate freelist.  After yielding, the next
8753   // free block encountered will start a coalescing range of
8754   // free blocks.  If the next free block is adjacent to the
8755   // chunk just flushed, they will need to wait for the next
8756   // sweep to be coalesced.
8757   if (inFreeRange()) {
8758     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8759   }
8760 
8761   // First give up the locks, then yield, then re-lock.
8762   // We should probably use a constructor/destructor idiom to
8763   // do this unlock/lock or modify the MutexUnlocker class to
8764   // serve our purpose. XXX
8765   assert_lock_strong(_bitMap->lock());
8766   assert_lock_strong(_freelistLock);
8767   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8768          "CMS thread should hold CMS token");
8769   _bitMap->lock()->unlock();
8770   _freelistLock->unlock();
8771   ConcurrentMarkSweepThread::desynchronize(true);
8772   ConcurrentMarkSweepThread::acknowledge_yield_request();
8773   _collector->stopTimer();
8774   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8775   if (PrintCMSStatistics != 0) {
8776     _collector->incrementYields();
8777   }
8778   _collector->icms_wait();
8779 
8780   // See the comment in coordinator_yield()
8781   for (unsigned i = 0; i < CMSYieldSleepCount &&
8782                        ConcurrentMarkSweepThread::should_yield() &&
8783                        !CMSCollector::foregroundGCIsActive(); ++i) {
8784     os::sleep(Thread::current(), 1, false);
8785     ConcurrentMarkSweepThread::acknowledge_yield_request();
8786   }
8787 
8788   ConcurrentMarkSweepThread::synchronize(true);
8789   _freelistLock->lock();
8790   _bitMap->lock()->lock_without_safepoint_check();
8791   _collector->startTimer();
8792 }
8793 
8794 #ifndef PRODUCT
8795 // This is actually very useful in a product build if it can
8796 // be called from the debugger.  Compile it into the product
8797 // as needed.
8798 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8799   return debug_cms_space->verify_chunk_in_free_list(fc);
8800 }
8801 #endif
8802 
8803 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8804   if (CMSTraceSweeper) {
8805     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8806                            fc, fc->size());
8807   }
8808 }
8809 
8810 // CMSIsAliveClosure
8811 bool CMSIsAliveClosure::do_object_b(oop obj) {
8812   HeapWord* addr = (HeapWord*)obj;
8813   return addr != NULL &&
8814          (!_span.contains(addr) || _bit_map->isMarked(addr));
8815 }
8816 
8817 
8818 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8819                       MemRegion span,
8820                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8821                       bool cpc):
8822   _collector(collector),
8823   _span(span),
8824   _bit_map(bit_map),
8825   _mark_stack(mark_stack),
8826   _concurrent_precleaning(cpc) {
8827   assert(!_span.is_empty(), "Empty span could spell trouble");
8828 }
8829 
8830 
8831 // CMSKeepAliveClosure: the serial version
8832 void CMSKeepAliveClosure::do_oop(oop obj) {
8833   HeapWord* addr = (HeapWord*)obj;
8834   if (_span.contains(addr) &&
8835       !_bit_map->isMarked(addr)) {
8836     _bit_map->mark(addr);
8837     bool simulate_overflow = false;
8838     NOT_PRODUCT(
8839       if (CMSMarkStackOverflowALot &&
8840           _collector->simulate_overflow()) {
8841         // simulate a stack overflow
8842         simulate_overflow = true;
8843       }
8844     )
8845     if (simulate_overflow || !_mark_stack->push(obj)) {
8846       if (_concurrent_precleaning) {
8847         // We dirty the overflown object and let the remark
8848         // phase deal with it.
8849         assert(_collector->overflow_list_is_empty(), "Error");
8850         // In the case of object arrays, we need to dirty all of
8851         // the cards that the object spans. No locking or atomics
8852         // are needed since no one else can be mutating the mod union
8853         // table.
8854         if (obj->is_objArray()) {
8855           size_t sz = obj->size();
8856           HeapWord* end_card_addr =
8857             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8858           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8859           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8860           _collector->_modUnionTable.mark_range(redirty_range);
8861         } else {
8862           _collector->_modUnionTable.mark(addr);
8863         }
8864         _collector->_ser_kac_preclean_ovflw++;
8865       } else {
8866         _collector->push_on_overflow_list(obj);
8867         _collector->_ser_kac_ovflw++;
8868       }
8869     }
8870   }
8871 }
8872 
8873 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8874 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8875 
8876 // CMSParKeepAliveClosure: a parallel version of the above.
8877 // The work queues are private to each closure (thread),
8878 // but (may be) available for stealing by other threads.
8879 void CMSParKeepAliveClosure::do_oop(oop obj) {
8880   HeapWord* addr = (HeapWord*)obj;
8881   if (_span.contains(addr) &&
8882       !_bit_map->isMarked(addr)) {
8883     // In general, during recursive tracing, several threads
8884     // may be concurrently getting here; the first one to
8885     // "tag" it, claims it.
8886     if (_bit_map->par_mark(addr)) {
8887       bool res = _work_queue->push(obj);
8888       assert(res, "Low water mark should be much less than capacity");
8889       // Do a recursive trim in the hope that this will keep
8890       // stack usage lower, but leave some oops for potential stealers
8891       trim_queue(_low_water_mark);
8892     } // Else, another thread got there first
8893   }
8894 }
8895 
8896 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8897 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8898 
8899 void CMSParKeepAliveClosure::trim_queue(uint max) {
8900   while (_work_queue->size() > max) {
8901     oop new_oop;
8902     if (_work_queue->pop_local(new_oop)) {
8903       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8904       assert(_bit_map->isMarked((HeapWord*)new_oop),
8905              "no white objects on this stack!");
8906       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8907       // iterate over the oops in this oop, marking and pushing
8908       // the ones in CMS heap (i.e. in _span).
8909       new_oop->oop_iterate(&_mark_and_push);
8910     }
8911   }
8912 }
8913 
8914 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8915                                 CMSCollector* collector,
8916                                 MemRegion span, CMSBitMap* bit_map,
8917                                 OopTaskQueue* work_queue):
8918   _collector(collector),
8919   _span(span),
8920   _bit_map(bit_map),
8921   _work_queue(work_queue) { }
8922 
8923 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8924   HeapWord* addr = (HeapWord*)obj;
8925   if (_span.contains(addr) &&
8926       !_bit_map->isMarked(addr)) {
8927     if (_bit_map->par_mark(addr)) {
8928       bool simulate_overflow = false;
8929       NOT_PRODUCT(
8930         if (CMSMarkStackOverflowALot &&
8931             _collector->par_simulate_overflow()) {
8932           // simulate a stack overflow
8933           simulate_overflow = true;
8934         }
8935       )
8936       if (simulate_overflow || !_work_queue->push(obj)) {
8937         _collector->par_push_on_overflow_list(obj);
8938         _collector->_par_kac_ovflw++;
8939       }
8940     } // Else another thread got there already
8941   }
8942 }
8943 
8944 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8945 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8946 
8947 //////////////////////////////////////////////////////////////////
8948 //  CMSExpansionCause                /////////////////////////////
8949 //////////////////////////////////////////////////////////////////
8950 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8951   switch (cause) {
8952     case _no_expansion:
8953       return "No expansion";
8954     case _satisfy_free_ratio:
8955       return "Free ratio";
8956     case _satisfy_promotion:
8957       return "Satisfy promotion";
8958     case _satisfy_allocation:
8959       return "allocation";
8960     case _allocate_par_lab:
8961       return "Par LAB";
8962     case _allocate_par_spooling_space:
8963       return "Par Spooling Space";
8964     case _adaptive_size_policy:
8965       return "Ergonomics";
8966     default:
8967       return "unknown";
8968   }
8969 }
8970 
8971 void CMSDrainMarkingStackClosure::do_void() {
8972   // the max number to take from overflow list at a time
8973   const size_t num = _mark_stack->capacity()/4;
8974   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8975          "Overflow list should be NULL during concurrent phases");
8976   while (!_mark_stack->isEmpty() ||
8977          // if stack is empty, check the overflow list
8978          _collector->take_from_overflow_list(num, _mark_stack)) {
8979     oop obj = _mark_stack->pop();
8980     HeapWord* addr = (HeapWord*)obj;
8981     assert(_span.contains(addr), "Should be within span");
8982     assert(_bit_map->isMarked(addr), "Should be marked");
8983     assert(obj->is_oop(), "Should be an oop");
8984     obj->oop_iterate(_keep_alive);
8985   }
8986 }
8987 
8988 void CMSParDrainMarkingStackClosure::do_void() {
8989   // drain queue
8990   trim_queue(0);
8991 }
8992 
8993 // Trim our work_queue so its length is below max at return
8994 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8995   while (_work_queue->size() > max) {
8996     oop new_oop;
8997     if (_work_queue->pop_local(new_oop)) {
8998       assert(new_oop->is_oop(), "Expected an oop");
8999       assert(_bit_map->isMarked((HeapWord*)new_oop),
9000              "no white objects on this stack!");
9001       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
9002       // iterate over the oops in this oop, marking and pushing
9003       // the ones in CMS heap (i.e. in _span).
9004       new_oop->oop_iterate(&_mark_and_push);
9005     }
9006   }
9007 }
9008 
9009 ////////////////////////////////////////////////////////////////////
9010 // Support for Marking Stack Overflow list handling and related code
9011 ////////////////////////////////////////////////////////////////////
9012 // Much of the following code is similar in shape and spirit to the
9013 // code used in ParNewGC. We should try and share that code
9014 // as much as possible in the future.
9015 
9016 #ifndef PRODUCT
9017 // Debugging support for CMSStackOverflowALot
9018 
9019 // It's OK to call this multi-threaded;  the worst thing
9020 // that can happen is that we'll get a bunch of closely
9021 // spaced simulated oveflows, but that's OK, in fact
9022 // probably good as it would exercise the overflow code
9023 // under contention.
9024 bool CMSCollector::simulate_overflow() {
9025   if (_overflow_counter-- <= 0) { // just being defensive
9026     _overflow_counter = CMSMarkStackOverflowInterval;
9027     return true;
9028   } else {
9029     return false;
9030   }
9031 }
9032 
9033 bool CMSCollector::par_simulate_overflow() {
9034   return simulate_overflow();
9035 }
9036 #endif
9037 
9038 // Single-threaded
9039 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
9040   assert(stack->isEmpty(), "Expected precondition");
9041   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
9042   size_t i = num;
9043   oop  cur = _overflow_list;
9044   const markOop proto = markOopDesc::prototype();
9045   NOT_PRODUCT(ssize_t n = 0;)
9046   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
9047     next = oop(cur->mark());
9048     cur->set_mark(proto);   // until proven otherwise
9049     assert(cur->is_oop(), "Should be an oop");
9050     bool res = stack->push(cur);
9051     assert(res, "Bit off more than can chew?");
9052     NOT_PRODUCT(n++;)
9053   }
9054   _overflow_list = cur;
9055 #ifndef PRODUCT
9056   assert(_num_par_pushes >= n, "Too many pops?");
9057   _num_par_pushes -=n;
9058 #endif
9059   return !stack->isEmpty();
9060 }
9061 
9062 #define BUSY  (oop(0x1aff1aff))
9063 // (MT-safe) Get a prefix of at most "num" from the list.
9064 // The overflow list is chained through the mark word of
9065 // each object in the list. We fetch the entire list,
9066 // break off a prefix of the right size and return the
9067 // remainder. If other threads try to take objects from
9068 // the overflow list at that time, they will wait for
9069 // some time to see if data becomes available. If (and
9070 // only if) another thread places one or more object(s)
9071 // on the global list before we have returned the suffix
9072 // to the global list, we will walk down our local list
9073 // to find its end and append the global list to
9074 // our suffix before returning it. This suffix walk can
9075 // prove to be expensive (quadratic in the amount of traffic)
9076 // when there are many objects in the overflow list and
9077 // there is much producer-consumer contention on the list.
9078 // *NOTE*: The overflow list manipulation code here and
9079 // in ParNewGeneration:: are very similar in shape,
9080 // except that in the ParNew case we use the old (from/eden)
9081 // copy of the object to thread the list via its klass word.
9082 // Because of the common code, if you make any changes in
9083 // the code below, please check the ParNew version to see if
9084 // similar changes might be needed.
9085 // CR 6797058 has been filed to consolidate the common code.
9086 bool CMSCollector::par_take_from_overflow_list(size_t num,
9087                                                OopTaskQueue* work_q,
9088                                                int no_of_gc_threads) {
9089   assert(work_q->size() == 0, "First empty local work queue");
9090   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9091   if (_overflow_list == NULL) {
9092     return false;
9093   }
9094   // Grab the entire list; we'll put back a suffix
9095   oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
9096   Thread* tid = Thread::current();
9097   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9098   // set to ParallelGCThreads.
9099   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9100   size_t sleep_time_millis = MAX2((size_t)1, num/100);
9101   // If the list is busy, we spin for a short while,
9102   // sleeping between attempts to get the list.
9103   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9104     os::sleep(tid, sleep_time_millis, false);
9105     if (_overflow_list == NULL) {
9106       // Nothing left to take
9107       return false;
9108     } else if (_overflow_list != BUSY) {
9109       // Try and grab the prefix
9110       prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
9111     }
9112   }
9113   // If the list was found to be empty, or we spun long
9114   // enough, we give up and return empty-handed. If we leave
9115   // the list in the BUSY state below, it must be the case that
9116   // some other thread holds the overflow list and will set it
9117   // to a non-BUSY state in the future.
9118   if (prefix == NULL || prefix == BUSY) {
9119      // Nothing to take or waited long enough
9120      if (prefix == NULL) {
9121        // Write back the NULL in case we overwrote it with BUSY above
9122        // and it is still the same value.
9123        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9124      }
9125      return false;
9126   }
9127   assert(prefix != NULL && prefix != BUSY, "Error");
9128   size_t i = num;
9129   oop cur = prefix;
9130   // Walk down the first "num" objects, unless we reach the end.
9131   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9132   if (cur->mark() == NULL) {
9133     // We have "num" or fewer elements in the list, so there
9134     // is nothing to return to the global list.
9135     // Write back the NULL in lieu of the BUSY we wrote
9136     // above, if it is still the same value.
9137     if (_overflow_list == BUSY) {
9138       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9139     }
9140   } else {
9141     // Chop off the suffix and rerturn it to the global list.
9142     assert(cur->mark() != BUSY, "Error");
9143     oop suffix_head = cur->mark(); // suffix will be put back on global list
9144     cur->set_mark(NULL);           // break off suffix
9145     // It's possible that the list is still in the empty(busy) state
9146     // we left it in a short while ago; in that case we may be
9147     // able to place back the suffix without incurring the cost
9148     // of a walk down the list.
9149     oop observed_overflow_list = _overflow_list;
9150     oop cur_overflow_list = observed_overflow_list;
9151     bool attached = false;
9152     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9153       observed_overflow_list =
9154         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9155       if (cur_overflow_list == observed_overflow_list) {
9156         attached = true;
9157         break;
9158       } else cur_overflow_list = observed_overflow_list;
9159     }
9160     if (!attached) {
9161       // Too bad, someone else sneaked in (at least) an element; we'll need
9162       // to do a splice. Find tail of suffix so we can prepend suffix to global
9163       // list.
9164       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9165       oop suffix_tail = cur;
9166       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9167              "Tautology");
9168       observed_overflow_list = _overflow_list;
9169       do {
9170         cur_overflow_list = observed_overflow_list;
9171         if (cur_overflow_list != BUSY) {
9172           // Do the splice ...
9173           suffix_tail->set_mark(markOop(cur_overflow_list));
9174         } else { // cur_overflow_list == BUSY
9175           suffix_tail->set_mark(NULL);
9176         }
9177         // ... and try to place spliced list back on overflow_list ...
9178         observed_overflow_list =
9179           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9180       } while (cur_overflow_list != observed_overflow_list);
9181       // ... until we have succeeded in doing so.
9182     }
9183   }
9184 
9185   // Push the prefix elements on work_q
9186   assert(prefix != NULL, "control point invariant");
9187   const markOop proto = markOopDesc::prototype();
9188   oop next;
9189   NOT_PRODUCT(ssize_t n = 0;)
9190   for (cur = prefix; cur != NULL; cur = next) {
9191     next = oop(cur->mark());
9192     cur->set_mark(proto);   // until proven otherwise
9193     assert(cur->is_oop(), "Should be an oop");
9194     bool res = work_q->push(cur);
9195     assert(res, "Bit off more than we can chew?");
9196     NOT_PRODUCT(n++;)
9197   }
9198 #ifndef PRODUCT
9199   assert(_num_par_pushes >= n, "Too many pops?");
9200   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9201 #endif
9202   return true;
9203 }
9204 
9205 // Single-threaded
9206 void CMSCollector::push_on_overflow_list(oop p) {
9207   NOT_PRODUCT(_num_par_pushes++;)
9208   assert(p->is_oop(), "Not an oop");
9209   preserve_mark_if_necessary(p);
9210   p->set_mark((markOop)_overflow_list);
9211   _overflow_list = p;
9212 }
9213 
9214 // Multi-threaded; use CAS to prepend to overflow list
9215 void CMSCollector::par_push_on_overflow_list(oop p) {
9216   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9217   assert(p->is_oop(), "Not an oop");
9218   par_preserve_mark_if_necessary(p);
9219   oop observed_overflow_list = _overflow_list;
9220   oop cur_overflow_list;
9221   do {
9222     cur_overflow_list = observed_overflow_list;
9223     if (cur_overflow_list != BUSY) {
9224       p->set_mark(markOop(cur_overflow_list));
9225     } else {
9226       p->set_mark(NULL);
9227     }
9228     observed_overflow_list =
9229       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9230   } while (cur_overflow_list != observed_overflow_list);
9231 }
9232 #undef BUSY
9233 
9234 // Single threaded
9235 // General Note on GrowableArray: pushes may silently fail
9236 // because we are (temporarily) out of C-heap for expanding
9237 // the stack. The problem is quite ubiquitous and affects
9238 // a lot of code in the JVM. The prudent thing for GrowableArray
9239 // to do (for now) is to exit with an error. However, that may
9240 // be too draconian in some cases because the caller may be
9241 // able to recover without much harm. For such cases, we
9242 // should probably introduce a "soft_push" method which returns
9243 // an indication of success or failure with the assumption that
9244 // the caller may be able to recover from a failure; code in
9245 // the VM can then be changed, incrementally, to deal with such
9246 // failures where possible, thus, incrementally hardening the VM
9247 // in such low resource situations.
9248 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9249   _preserved_oop_stack.push(p);
9250   _preserved_mark_stack.push(m);
9251   assert(m == p->mark(), "Mark word changed");
9252   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9253          "bijection");
9254 }
9255 
9256 // Single threaded
9257 void CMSCollector::preserve_mark_if_necessary(oop p) {
9258   markOop m = p->mark();
9259   if (m->must_be_preserved(p)) {
9260     preserve_mark_work(p, m);
9261   }
9262 }
9263 
9264 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9265   markOop m = p->mark();
9266   if (m->must_be_preserved(p)) {
9267     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9268     // Even though we read the mark word without holding
9269     // the lock, we are assured that it will not change
9270     // because we "own" this oop, so no other thread can
9271     // be trying to push it on the overflow list; see
9272     // the assertion in preserve_mark_work() that checks
9273     // that m == p->mark().
9274     preserve_mark_work(p, m);
9275   }
9276 }
9277 
9278 // We should be able to do this multi-threaded,
9279 // a chunk of stack being a task (this is
9280 // correct because each oop only ever appears
9281 // once in the overflow list. However, it's
9282 // not very easy to completely overlap this with
9283 // other operations, so will generally not be done
9284 // until all work's been completed. Because we
9285 // expect the preserved oop stack (set) to be small,
9286 // it's probably fine to do this single-threaded.
9287 // We can explore cleverer concurrent/overlapped/parallel
9288 // processing of preserved marks if we feel the
9289 // need for this in the future. Stack overflow should
9290 // be so rare in practice and, when it happens, its
9291 // effect on performance so great that this will
9292 // likely just be in the noise anyway.
9293 void CMSCollector::restore_preserved_marks_if_any() {
9294   assert(SafepointSynchronize::is_at_safepoint(),
9295          "world should be stopped");
9296   assert(Thread::current()->is_ConcurrentGC_thread() ||
9297          Thread::current()->is_VM_thread(),
9298          "should be single-threaded");
9299   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9300          "bijection");
9301 
9302   while (!_preserved_oop_stack.is_empty()) {
9303     oop p = _preserved_oop_stack.pop();
9304     assert(p->is_oop(), "Should be an oop");
9305     assert(_span.contains(p), "oop should be in _span");
9306     assert(p->mark() == markOopDesc::prototype(),
9307            "Set when taken from overflow list");
9308     markOop m = _preserved_mark_stack.pop();
9309     p->set_mark(m);
9310   }
9311   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9312          "stacks were cleared above");
9313 }
9314 
9315 #ifndef PRODUCT
9316 bool CMSCollector::no_preserved_marks() const {
9317   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9318 }
9319 #endif
9320 
9321 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9322 {
9323   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9324   CMSAdaptiveSizePolicy* size_policy =
9325     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9326   assert(size_policy->is_gc_cms_adaptive_size_policy(),
9327     "Wrong type for size policy");
9328   return size_policy;
9329 }
9330 
9331 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9332                                            size_t desired_promo_size) {
9333   if (cur_promo_size < desired_promo_size) {
9334     size_t expand_bytes = desired_promo_size - cur_promo_size;
9335     if (PrintAdaptiveSizePolicy && Verbose) {
9336       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9337         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9338         expand_bytes);
9339     }
9340     expand(expand_bytes,
9341            MinHeapDeltaBytes,
9342            CMSExpansionCause::_adaptive_size_policy);
9343   } else if (desired_promo_size < cur_promo_size) {
9344     size_t shrink_bytes = cur_promo_size - desired_promo_size;
9345     if (PrintAdaptiveSizePolicy && Verbose) {
9346       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9347         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9348         shrink_bytes);
9349     }
9350     shrink(shrink_bytes);
9351   }
9352 }
9353 
9354 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9355   GenCollectedHeap* gch = GenCollectedHeap::heap();
9356   CMSGCAdaptivePolicyCounters* counters =
9357     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9358   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9359     "Wrong kind of counters");
9360   return counters;
9361 }
9362 
9363 
9364 void ASConcurrentMarkSweepGeneration::update_counters() {
9365   if (UsePerfData) {
9366     _space_counters->update_all();
9367     _gen_counters->update_all();
9368     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9369     GenCollectedHeap* gch = GenCollectedHeap::heap();
9370     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9371     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9372       "Wrong gc statistics type");
9373     counters->update_counters(gc_stats_l);
9374   }
9375 }
9376 
9377 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9378   if (UsePerfData) {
9379     _space_counters->update_used(used);
9380     _space_counters->update_capacity();
9381     _gen_counters->update_all();
9382 
9383     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9384     GenCollectedHeap* gch = GenCollectedHeap::heap();
9385     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9386     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9387       "Wrong gc statistics type");
9388     counters->update_counters(gc_stats_l);
9389   }
9390 }
9391 
9392 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9393   assert_locked_or_safepoint(Heap_lock);
9394   assert_lock_strong(freelistLock());
9395   HeapWord* old_end = _cmsSpace->end();
9396   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9397   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9398   FreeChunk* chunk_at_end = find_chunk_at_end();
9399   if (chunk_at_end == NULL) {
9400     // No room to shrink
9401     if (PrintGCDetails && Verbose) {
9402       gclog_or_tty->print_cr("No room to shrink: old_end  "
9403         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
9404         " chunk_at_end  " PTR_FORMAT,
9405         old_end, unallocated_start, chunk_at_end);
9406     }
9407     return;
9408   } else {
9409 
9410     // Find the chunk at the end of the space and determine
9411     // how much it can be shrunk.
9412     size_t shrinkable_size_in_bytes = chunk_at_end->size();
9413     size_t aligned_shrinkable_size_in_bytes =
9414       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9415     assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9416       "Inconsistent chunk at end of space");
9417     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9418     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9419 
9420     // Shrink the underlying space
9421     _virtual_space.shrink_by(bytes);
9422     if (PrintGCDetails && Verbose) {
9423       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9424         " desired_bytes " SIZE_FORMAT
9425         " shrinkable_size_in_bytes " SIZE_FORMAT
9426         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9427         "  bytes  " SIZE_FORMAT,
9428         desired_bytes, shrinkable_size_in_bytes,
9429         aligned_shrinkable_size_in_bytes, bytes);
9430       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
9431         "  unallocated_start  " SIZE_FORMAT,
9432         old_end, unallocated_start);
9433     }
9434 
9435     // If the space did shrink (shrinking is not guaranteed),
9436     // shrink the chunk at the end by the appropriate amount.
9437     if (((HeapWord*)_virtual_space.high()) < old_end) {
9438       size_t new_word_size =
9439         heap_word_size(_virtual_space.committed_size());
9440 
9441       // Have to remove the chunk from the dictionary because it is changing
9442       // size and might be someplace elsewhere in the dictionary.
9443 
9444       // Get the chunk at end, shrink it, and put it
9445       // back.
9446       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9447       size_t word_size_change = word_size_before - new_word_size;
9448       size_t chunk_at_end_old_size = chunk_at_end->size();
9449       assert(chunk_at_end_old_size >= word_size_change,
9450         "Shrink is too large");
9451       chunk_at_end->set_size(chunk_at_end_old_size -
9452                           word_size_change);
9453       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9454         word_size_change);
9455 
9456       _cmsSpace->returnChunkToDictionary(chunk_at_end);
9457 
9458       MemRegion mr(_cmsSpace->bottom(), new_word_size);
9459       _bts->resize(new_word_size);  // resize the block offset shared array
9460       Universe::heap()->barrier_set()->resize_covered_region(mr);
9461       _cmsSpace->assert_locked();
9462       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9463 
9464       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9465 
9466       // update the space and generation capacity counters
9467       if (UsePerfData) {
9468         _space_counters->update_capacity();
9469         _gen_counters->update_all();
9470       }
9471 
9472       if (Verbose && PrintGCDetails) {
9473         size_t new_mem_size = _virtual_space.committed_size();
9474         size_t old_mem_size = new_mem_size + bytes;
9475         gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9476                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9477       }
9478     }
9479 
9480     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9481       "Inconsistency at end of space");
9482     assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9483       "Shrinking is inconsistent");
9484     return;
9485   }
9486 }
9487 // Transfer some number of overflown objects to usual marking
9488 // stack. Return true if some objects were transferred.
9489 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9490   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9491                     (size_t)ParGCDesiredObjsFromOverflowList);
9492 
9493   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9494   assert(_collector->overflow_list_is_empty() || res,
9495          "If list is not empty, we should have taken something");
9496   assert(!res || !_mark_stack->isEmpty(),
9497          "If we took something, it should now be on our stack");
9498   return res;
9499 }
9500 
9501 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9502   size_t res = _sp->block_size_no_stall(addr, _collector);
9503   if (_sp->block_is_obj(addr)) {
9504     if (_live_bit_map->isMarked(addr)) {
9505       // It can't have been dead in a previous cycle
9506       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9507     } else {
9508       _dead_bit_map->mark(addr);      // mark the dead object
9509     }
9510   }
9511   // Could be 0, if the block size could not be computed without stalling.
9512   return res;
9513 }
9514 
9515 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9516 
9517   switch (phase) {
9518     case CMSCollector::InitialMarking:
9519       initialize(true  /* fullGC */ ,
9520                  cause /* cause of the GC */,
9521                  true  /* recordGCBeginTime */,
9522                  true  /* recordPreGCUsage */,
9523                  false /* recordPeakUsage */,
9524                  false /* recordPostGCusage */,
9525                  true  /* recordAccumulatedGCTime */,
9526                  false /* recordGCEndTime */,
9527                  false /* countCollection */  );
9528       break;
9529 
9530     case CMSCollector::FinalMarking:
9531       initialize(true  /* fullGC */ ,
9532                  cause /* cause of the GC */,
9533                  false /* recordGCBeginTime */,
9534                  false /* recordPreGCUsage */,
9535                  false /* recordPeakUsage */,
9536                  false /* recordPostGCusage */,
9537                  true  /* recordAccumulatedGCTime */,
9538                  false /* recordGCEndTime */,
9539                  false /* countCollection */  );
9540       break;
9541 
9542     case CMSCollector::Sweeping:
9543       initialize(true  /* fullGC */ ,
9544                  cause /* cause of the GC */,
9545                  false /* recordGCBeginTime */,
9546                  false /* recordPreGCUsage */,
9547                  true  /* recordPeakUsage */,
9548                  true  /* recordPostGCusage */,
9549                  false /* recordAccumulatedGCTime */,
9550                  true  /* recordGCEndTime */,
9551                  true  /* countCollection */  );
9552       break;
9553 
9554     default:
9555       ShouldNotReachHere();
9556   }
9557 }