1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // ConcurrentMarkSweepGeneration is in support of a concurrent
  26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
  27 // style. We assume, for now, that this generation is always the
  28 // seniormost generation (modulo the PermGeneration), and for simplicity
  29 // in the first implementation, that this generation is a single compactible
  30 // space. Neither of these restrictions appears essential, and will be
  31 // relaxed in the future when more time is available to implement the
  32 // greater generality (and there's a need for it).
  33 //
  34 // Concurrent mode failures are currently handled by
  35 // means of a sliding mark-compact.
  36 
  37 class CMSAdaptiveSizePolicy;
  38 class CMSConcMarkingTask;
  39 class CMSGCAdaptivePolicyCounters;
  40 class ConcurrentMarkSweepGeneration;
  41 class ConcurrentMarkSweepPolicy;
  42 class ConcurrentMarkSweepThread;
  43 class CompactibleFreeListSpace;
  44 class FreeChunk;
  45 class PromotionInfo;
  46 class ScanMarkedObjectsAgainCarefullyClosure;
  47 
  48 // A generic CMS bit map. It's the basis for both the CMS marking bit map
  49 // as well as for the mod union table (in each case only a subset of the
  50 // methods are used). This is essentially a wrapper around the BitMap class,
  51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
  52 // we have _shifter == 0. and for the mod union table we have
  53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
  54 // XXX 64-bit issues in BitMap?
  55 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
  56   friend class VMStructs;
  57 
  58   HeapWord* _bmStartWord;   // base address of range covered by map
  59   size_t    _bmWordSize;    // map size (in #HeapWords covered)
  60   const int _shifter;       // shifts to convert HeapWord to bit position
  61   VirtualSpace _virtual_space; // underlying the bit map
  62   BitMap    _bm;            // the bit map itself
  63  public:
  64   Mutex* const _lock;       // mutex protecting _bm;
  65 
  66  public:
  67   // constructor
  68   CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
  69 
  70   // allocates the actual storage for the map
  71   bool allocate(MemRegion mr);
  72   // field getter
  73   Mutex* lock() const { return _lock; }
  74   // locking verifier convenience function
  75   void assert_locked() const PRODUCT_RETURN;
  76 
  77   // inquiries
  78   HeapWord* startWord()   const { return _bmStartWord; }
  79   size_t    sizeInWords() const { return _bmWordSize;  }
  80   size_t    sizeInBits()  const { return _bm.size();   }
  81   // the following is one past the last word in space
  82   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
  83 
  84   // reading marks
  85   bool isMarked(HeapWord* addr) const;
  86   bool par_isMarked(HeapWord* addr) const; // do not lock checks
  87   bool isUnmarked(HeapWord* addr) const;
  88   bool isAllClear() const;
  89 
  90   // writing marks
  91   void mark(HeapWord* addr);
  92   // For marking by parallel GC threads;
  93   // returns true if we did, false if another thread did
  94   bool par_mark(HeapWord* addr);
  95 
  96   void mark_range(MemRegion mr);
  97   void par_mark_range(MemRegion mr);
  98   void mark_large_range(MemRegion mr);
  99   void par_mark_large_range(MemRegion mr);
 100   void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
 101   void clear_range(MemRegion mr);
 102   void par_clear_range(MemRegion mr);
 103   void clear_large_range(MemRegion mr);
 104   void par_clear_large_range(MemRegion mr);
 105   void clear_all();
 106   void clear_all_incrementally();  // Not yet implemented!!
 107 
 108   NOT_PRODUCT(
 109     // checks the memory region for validity
 110     void region_invariant(MemRegion mr);
 111   )
 112 
 113   // iteration
 114   void iterate(BitMapClosure* cl) {
 115     _bm.iterate(cl);
 116   }
 117   void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
 118   void dirty_range_iterate_clear(MemRegionClosure* cl);
 119   void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
 120 
 121   // auxiliary support for iteration
 122   HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
 123   HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
 124                                             HeapWord* end_addr) const;
 125   HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
 126   HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
 127                                               HeapWord* end_addr) const;
 128   MemRegion getAndClearMarkedRegion(HeapWord* addr);
 129   MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
 130                                            HeapWord* end_addr);
 131 
 132   // conversion utilities
 133   HeapWord* offsetToHeapWord(size_t offset) const;
 134   size_t    heapWordToOffset(HeapWord* addr) const;
 135   size_t    heapWordDiffToOffsetDiff(size_t diff) const;
 136 
 137   // debugging
 138   // is this address range covered by the bit-map?
 139   NOT_PRODUCT(
 140     bool covers(MemRegion mr) const;
 141     bool covers(HeapWord* start, size_t size = 0) const;
 142   )
 143   void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
 144 };
 145 
 146 // Represents a marking stack used by the CMS collector.
 147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
 148 class CMSMarkStack: public CHeapObj  {
 149   //
 150   friend class CMSCollector;   // to get at expasion stats further below
 151   //
 152 
 153   VirtualSpace _virtual_space;  // space for the stack
 154   oop*   _base;      // bottom of stack
 155   size_t _index;     // one more than last occupied index
 156   size_t _capacity;  // max #elements
 157   Mutex  _par_lock;  // an advisory lock used in case of parallel access
 158   NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
 159 
 160  protected:
 161   size_t _hit_limit;      // we hit max stack size limit
 162   size_t _failed_double;  // we failed expansion before hitting limit
 163 
 164  public:
 165   CMSMarkStack():
 166     _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
 167     _hit_limit(0),
 168     _failed_double(0) {}
 169 
 170   bool allocate(size_t size);
 171 
 172   size_t capacity() const { return _capacity; }
 173 
 174   oop pop() {
 175     if (!isEmpty()) {
 176       return _base[--_index] ;
 177     }
 178     return NULL;
 179   }
 180 
 181   bool push(oop ptr) {
 182     if (isFull()) {
 183       return false;
 184     } else {
 185       _base[_index++] = ptr;
 186       NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
 187       return true;
 188     }
 189   }
 190 
 191   bool isEmpty() const { return _index == 0; }
 192   bool isFull()  const {
 193     assert(_index <= _capacity, "buffer overflow");
 194     return _index == _capacity;
 195   }
 196 
 197   size_t length() { return _index; }
 198 
 199   // "Parallel versions" of some of the above
 200   oop par_pop() {
 201     // lock and pop
 202     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
 203     return pop();
 204   }
 205 
 206   bool par_push(oop ptr) {
 207     // lock and push
 208     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
 209     return push(ptr);
 210   }
 211 
 212   // Forcibly reset the stack, losing all of its contents.
 213   void reset() {
 214     _index = 0;
 215   }
 216 
 217   // Expand the stack, typically in response to an overflow condition
 218   void expand();
 219 
 220   // Compute the least valued stack element.
 221   oop least_value(HeapWord* low) {
 222      oop least = (oop)low;
 223      for (size_t i = 0; i < _index; i++) {
 224        least = MIN2(least, _base[i]);
 225      }
 226      return least;
 227   }
 228 
 229   // Exposed here to allow stack expansion in || case
 230   Mutex* par_lock() { return &_par_lock; }
 231 };
 232 
 233 class CardTableRS;
 234 class CMSParGCThreadState;
 235 
 236 class ModUnionClosure: public MemRegionClosure {
 237  protected:
 238   CMSBitMap* _t;
 239  public:
 240   ModUnionClosure(CMSBitMap* t): _t(t) { }
 241   void do_MemRegion(MemRegion mr);
 242 };
 243 
 244 class ModUnionClosurePar: public ModUnionClosure {
 245  public:
 246   ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
 247   void do_MemRegion(MemRegion mr);
 248 };
 249 
 250 // Survivor Chunk Array in support of parallelization of
 251 // Survivor Space rescan.
 252 class ChunkArray: public CHeapObj {
 253   size_t _index;
 254   size_t _capacity;
 255   HeapWord** _array;   // storage for array
 256 
 257  public:
 258   ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
 259   ChunkArray(HeapWord** a, size_t c):
 260     _index(0), _capacity(c), _array(a) {}
 261 
 262   HeapWord** array() { return _array; }
 263   void set_array(HeapWord** a) { _array = a; }
 264 
 265   size_t capacity() { return _capacity; }
 266   void set_capacity(size_t c) { _capacity = c; }
 267 
 268   size_t end() {
 269     assert(_index < capacity(), "_index out of bounds");
 270     return _index;
 271   }  // exclusive
 272 
 273   HeapWord* nth(size_t n) {
 274     assert(n < end(), "Out of bounds access");
 275     return _array[n];
 276   }
 277 
 278   void reset() {
 279     _index = 0;
 280   }
 281 
 282   void record_sample(HeapWord* p, size_t sz) {
 283     // For now we do not do anything with the size
 284     if (_index < _capacity) {
 285       _array[_index++] = p;
 286     }
 287   }
 288 };
 289 
 290 //
 291 // Timing, allocation and promotion statistics for gc scheduling and incremental
 292 // mode pacing.  Most statistics are exponential averages.
 293 //
 294 class CMSStats VALUE_OBJ_CLASS_SPEC {
 295  private:
 296   ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
 297 
 298   // The following are exponential averages with factor alpha:
 299   //   avg = (100 - alpha) * avg + alpha * cur_sample
 300   //
 301   //   The durations measure:  end_time[n] - start_time[n]
 302   //   The periods measure:    start_time[n] - start_time[n-1]
 303   //
 304   // The cms period and duration include only concurrent collections; time spent
 305   // in foreground cms collections due to System.gc() or because of a failure to
 306   // keep up are not included.
 307   //
 308   // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
 309   // real value, but is used only after the first period.  A value of 100 is
 310   // used for the first sample so it gets the entire weight.
 311   unsigned int _saved_alpha; // 0-100
 312   unsigned int _gc0_alpha;
 313   unsigned int _cms_alpha;
 314 
 315   double _gc0_duration;
 316   double _gc0_period;
 317   size_t _gc0_promoted;         // bytes promoted per gc0
 318   double _cms_duration;
 319   double _cms_duration_pre_sweep; // time from initiation to start of sweep
 320   double _cms_duration_per_mb;
 321   double _cms_period;
 322   size_t _cms_allocated;        // bytes of direct allocation per gc0 period
 323 
 324   // Timers.
 325   elapsedTimer _cms_timer;
 326   TimeStamp    _gc0_begin_time;
 327   TimeStamp    _cms_begin_time;
 328   TimeStamp    _cms_end_time;
 329 
 330   // Snapshots of the amount used in the CMS generation.
 331   size_t _cms_used_at_gc0_begin;
 332   size_t _cms_used_at_gc0_end;
 333   size_t _cms_used_at_cms_begin;
 334 
 335   // Used to prevent the duty cycle from being reduced in the middle of a cms
 336   // cycle.
 337   bool _allow_duty_cycle_reduction;
 338 
 339   enum {
 340     _GC0_VALID = 0x1,
 341     _CMS_VALID = 0x2,
 342     _ALL_VALID = _GC0_VALID | _CMS_VALID
 343   };
 344 
 345   unsigned int _valid_bits;
 346 
 347   unsigned int _icms_duty_cycle;        // icms duty cycle (0-100).
 348 
 349  protected:
 350 
 351   // Return a duty cycle that avoids wild oscillations, by limiting the amount
 352   // of change between old_duty_cycle and new_duty_cycle (the latter is treated
 353   // as a recommended value).
 354   static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
 355                                              unsigned int new_duty_cycle);
 356   unsigned int icms_update_duty_cycle_impl();
 357 
 358   // In support of adjusting of cms trigger ratios based on history
 359   // of concurrent mode failure.
 360   double cms_free_adjustment_factor(size_t free) const;
 361   void   adjust_cms_free_adjustment_factor(bool fail, size_t free);
 362 
 363  public:
 364   CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
 365            unsigned int alpha = CMSExpAvgFactor);
 366 
 367   // Whether or not the statistics contain valid data; higher level statistics
 368   // cannot be called until this returns true (they require at least one young
 369   // gen and one cms cycle to have completed).
 370   bool valid() const;
 371 
 372   // Record statistics.
 373   void record_gc0_begin();
 374   void record_gc0_end(size_t cms_gen_bytes_used);
 375   void record_cms_begin();
 376   void record_cms_end();
 377 
 378   // Allow management of the cms timer, which must be stopped/started around
 379   // yield points.
 380   elapsedTimer& cms_timer()     { return _cms_timer; }
 381   void start_cms_timer()        { _cms_timer.start(); }
 382   void stop_cms_timer()         { _cms_timer.stop(); }
 383 
 384   // Basic statistics; units are seconds or bytes.
 385   double gc0_period() const     { return _gc0_period; }
 386   double gc0_duration() const   { return _gc0_duration; }
 387   size_t gc0_promoted() const   { return _gc0_promoted; }
 388   double cms_period() const          { return _cms_period; }
 389   double cms_duration() const        { return _cms_duration; }
 390   double cms_duration_per_mb() const { return _cms_duration_per_mb; }
 391   size_t cms_allocated() const       { return _cms_allocated; }
 392 
 393   size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
 394 
 395   // Seconds since the last background cms cycle began or ended.
 396   double cms_time_since_begin() const;
 397   double cms_time_since_end() const;
 398 
 399   // Higher level statistics--caller must check that valid() returns true before
 400   // calling.
 401 
 402   // Returns bytes promoted per second of wall clock time.
 403   double promotion_rate() const;
 404 
 405   // Returns bytes directly allocated per second of wall clock time.
 406   double cms_allocation_rate() const;
 407 
 408   // Rate at which space in the cms generation is being consumed (sum of the
 409   // above two).
 410   double cms_consumption_rate() const;
 411 
 412   // Returns an estimate of the number of seconds until the cms generation will
 413   // fill up, assuming no collection work is done.
 414   double time_until_cms_gen_full() const;
 415 
 416   // Returns an estimate of the number of seconds remaining until
 417   // the cms generation collection should start.
 418   double time_until_cms_start() const;
 419 
 420   // End of higher level statistics.
 421 
 422   // Returns the cms incremental mode duty cycle, as a percentage (0-100).
 423   unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
 424 
 425   // Update the duty cycle and return the new value.
 426   unsigned int icms_update_duty_cycle();
 427 
 428   // Debugging.
 429   void print_on(outputStream* st) const PRODUCT_RETURN;
 430   void print() const { print_on(gclog_or_tty); }
 431 };
 432 
 433 // A closure related to weak references processing which
 434 // we embed in the CMSCollector, since we need to pass
 435 // it to the reference processor for secondary filtering
 436 // of references based on reachability of referent;
 437 // see role of _is_alive_non_header closure in the
 438 // ReferenceProcessor class.
 439 // For objects in the CMS generation, this closure checks
 440 // if the object is "live" (reachable). Used in weak
 441 // reference processing.
 442 class CMSIsAliveClosure: public BoolObjectClosure {
 443   const MemRegion  _span;
 444   const CMSBitMap* _bit_map;
 445 
 446   friend class CMSCollector;
 447  public:
 448   CMSIsAliveClosure(MemRegion span,
 449                     CMSBitMap* bit_map):
 450     _span(span),
 451     _bit_map(bit_map) {
 452     assert(!span.is_empty(), "Empty span could spell trouble");
 453   }
 454 
 455   void do_object(oop obj) {
 456     assert(false, "not to be invoked");
 457   }
 458 
 459   bool do_object_b(oop obj);
 460 };
 461 
 462 
 463 // Implements AbstractRefProcTaskExecutor for CMS.
 464 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 465 public:
 466 
 467   CMSRefProcTaskExecutor(CMSCollector& collector)
 468     : _collector(collector)
 469   { }
 470 
 471   // Executes a task using worker threads.
 472   virtual void execute(ProcessTask& task);
 473   virtual void execute(EnqueueTask& task);
 474 private:
 475   CMSCollector& _collector;
 476 };
 477 
 478 
 479 class CMSCollector: public CHeapObj {
 480   friend class VMStructs;
 481   friend class ConcurrentMarkSweepThread;
 482   friend class ConcurrentMarkSweepGeneration;
 483   friend class CompactibleFreeListSpace;
 484   friend class CMSParRemarkTask;
 485   friend class CMSConcMarkingTask;
 486   friend class CMSRefProcTaskProxy;
 487   friend class CMSRefProcTaskExecutor;
 488   friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
 489   friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
 490   friend class PushOrMarkClosure;             // to access _restart_addr
 491   friend class Par_PushOrMarkClosure;             // to access _restart_addr
 492   friend class MarkFromRootsClosure;          //  -- ditto --
 493                                               // ... and for clearing cards
 494   friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
 495                                               // ... and for clearing cards
 496   friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
 497   friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
 498   friend class PushAndMarkVerifyClosure;      //  -- ditto --
 499   friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
 500   friend class PushAndMarkClosure;            //  -- ditto --
 501   friend class Par_PushAndMarkClosure;        //  -- ditto --
 502   friend class CMSKeepAliveClosure;           //  -- ditto --
 503   friend class CMSDrainMarkingStackClosure;   //  -- ditto --
 504   friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
 505   NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
 506   friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
 507   friend class VM_CMS_Operation;
 508   friend class VM_CMS_Initial_Mark;
 509   friend class VM_CMS_Final_Remark;
 510   friend class TraceCMSMemoryManagerStats;
 511 
 512  private:
 513   jlong _time_of_last_gc;
 514   void update_time_of_last_gc(jlong now) {
 515     _time_of_last_gc = now;
 516   }
 517 
 518   OopTaskQueueSet* _task_queues;
 519 
 520   // Overflow list of grey objects, threaded through mark-word
 521   // Manipulated with CAS in the parallel/multi-threaded case.
 522   oop _overflow_list;
 523   // The following array-pair keeps track of mark words
 524   // displaced for accomodating overflow list above.
 525   // This code will likely be revisited under RFE#4922830.
 526   GrowableArray<oop>*     _preserved_oop_stack;
 527   GrowableArray<markOop>* _preserved_mark_stack;
 528 
 529   int*             _hash_seed;
 530 
 531   // In support of multi-threaded concurrent phases
 532   YieldingFlexibleWorkGang* _conc_workers;
 533 
 534   // Performance Counters
 535   CollectorCounters* _gc_counters;
 536 
 537   // Initialization Errors
 538   bool _completed_initialization;
 539 
 540   // In support of ExplicitGCInvokesConcurrent
 541   static   bool _full_gc_requested;
 542   unsigned int  _collection_count_start;
 543 
 544   // Should we unload classes this concurrent cycle?
 545   bool _should_unload_classes;
 546   unsigned int  _concurrent_cycles_since_last_unload;
 547   unsigned int concurrent_cycles_since_last_unload() const {
 548     return _concurrent_cycles_since_last_unload;
 549   }
 550   // Did we (allow) unload classes in the previous concurrent cycle?
 551   bool unloaded_classes_last_cycle() const {
 552     return concurrent_cycles_since_last_unload() == 0;
 553   }
 554   // Root scanning options for perm gen
 555   int _roots_scanning_options;
 556   int roots_scanning_options() const      { return _roots_scanning_options; }
 557   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
 558   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
 559 
 560   // Verification support
 561   CMSBitMap     _verification_mark_bm;
 562   void verify_after_remark_work_1();
 563   void verify_after_remark_work_2();
 564 
 565   // true if any verification flag is on.
 566   bool _verifying;
 567   bool verifying() const { return _verifying; }
 568   void set_verifying(bool v) { _verifying = v; }
 569 
 570   // Collector policy
 571   ConcurrentMarkSweepPolicy* _collector_policy;
 572   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 573 
 574   // XXX Move these to CMSStats ??? FIX ME !!!
 575   elapsedTimer _inter_sweep_timer;   // time between sweeps
 576   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
 577   // padded decaying average estimates of the above
 578   AdaptivePaddedAverage _inter_sweep_estimate;
 579   AdaptivePaddedAverage _intra_sweep_estimate;
 580 
 581  protected:
 582   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
 583   ConcurrentMarkSweepGeneration* _permGen; // perm gen
 584   MemRegion                      _span;    // span covering above two
 585   CardTableRS*                   _ct;      // card table
 586 
 587   // CMS marking support structures
 588   CMSBitMap     _markBitMap;
 589   CMSBitMap     _modUnionTable;
 590   CMSMarkStack  _markStack;
 591   CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
 592                                           // to revisit
 593   CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
 594 
 595   HeapWord*     _restart_addr; // in support of marking stack overflow
 596   void          lower_restart_addr(HeapWord* low);
 597 
 598   // Counters in support of marking stack / work queue overflow handling:
 599   // a non-zero value indicates certain types of overflow events during
 600   // the current CMS cycle and could lead to stack resizing efforts at
 601   // an opportune future time.
 602   size_t        _ser_pmc_preclean_ovflw;
 603   size_t        _ser_pmc_remark_ovflw;
 604   size_t        _par_pmc_remark_ovflw;
 605   size_t        _ser_kac_preclean_ovflw;
 606   size_t        _ser_kac_ovflw;
 607   size_t        _par_kac_ovflw;
 608   NOT_PRODUCT(ssize_t _num_par_pushes;)
 609 
 610   // ("Weak") Reference processing support
 611   ReferenceProcessor*            _ref_processor;
 612   CMSIsAliveClosure              _is_alive_closure;
 613       // keep this textually after _markBitMap and _span; c'tor dependency
 614 
 615   ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
 616   ModUnionClosure    _modUnionClosure;
 617   ModUnionClosurePar _modUnionClosurePar;
 618 
 619   // CMS abstract state machine
 620   // initial_state: Idling
 621   // next_state(Idling)            = {Marking}
 622   // next_state(Marking)           = {Precleaning, Sweeping}
 623   // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
 624   // next_state(AbortablePreclean) = {FinalMarking}
 625   // next_state(FinalMarking)      = {Sweeping}
 626   // next_state(Sweeping)          = {Resizing}
 627   // next_state(Resizing)          = {Resetting}
 628   // next_state(Resetting)         = {Idling}
 629   // The numeric values below are chosen so that:
 630   // . _collectorState <= Idling ==  post-sweep && pre-mark
 631   // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
 632   //                                            precleaning || abortablePrecleanb
 633  public:
 634   enum CollectorState {
 635     Resizing            = 0,
 636     Resetting           = 1,
 637     Idling              = 2,
 638     InitialMarking      = 3,
 639     Marking             = 4,
 640     Precleaning         = 5,
 641     AbortablePreclean   = 6,
 642     FinalMarking        = 7,
 643     Sweeping            = 8
 644   };
 645  protected:
 646   static CollectorState _collectorState;
 647 
 648   // State related to prologue/epilogue invocation for my generations
 649   bool _between_prologue_and_epilogue;
 650 
 651   // Signalling/State related to coordination between fore- and backgroud GC
 652   // Note: When the baton has been passed from background GC to foreground GC,
 653   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
 654   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
 655                                  // wants to go active
 656   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
 657                                  // yet passed the baton to the foreground GC
 658 
 659   // Support for CMSScheduleRemark (abortable preclean)
 660   bool _abort_preclean;
 661   bool _start_sampling;
 662 
 663   int    _numYields;
 664   size_t _numDirtyCards;
 665   size_t _sweep_count;
 666   // number of full gc's since the last concurrent gc.
 667   uint   _full_gcs_since_conc_gc;
 668 
 669   // occupancy used for bootstrapping stats
 670   double _bootstrap_occupancy;
 671 
 672   // timer
 673   elapsedTimer _timer;
 674 
 675   // Timing, allocation and promotion statistics, used for scheduling.
 676   CMSStats      _stats;
 677 
 678   // Allocation limits installed in the young gen, used only in
 679   // CMSIncrementalMode.  When an allocation in the young gen would cross one of
 680   // these limits, the cms generation is notified and the cms thread is started
 681   // or stopped, respectively.
 682   HeapWord*     _icms_start_limit;
 683   HeapWord*     _icms_stop_limit;
 684 
 685   enum CMS_op_type {
 686     CMS_op_checkpointRootsInitial,
 687     CMS_op_checkpointRootsFinal
 688   };
 689 
 690   void do_CMS_operation(CMS_op_type op);
 691   bool stop_world_and_do(CMS_op_type op);
 692 
 693   OopTaskQueueSet* task_queues() { return _task_queues; }
 694   int*             hash_seed(int i) { return &_hash_seed[i]; }
 695   YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
 696 
 697   // Support for parallelizing Eden rescan in CMS remark phase
 698   void sample_eden(); // ... sample Eden space top
 699 
 700  private:
 701   // Support for parallelizing young gen rescan in CMS remark phase
 702   Generation* _young_gen;  // the younger gen
 703   HeapWord** _top_addr;    // ... Top of Eden
 704   HeapWord** _end_addr;    // ... End of Eden
 705   HeapWord** _eden_chunk_array; // ... Eden partitioning array
 706   size_t     _eden_chunk_index; // ... top (exclusive) of array
 707   size_t     _eden_chunk_capacity;  // ... max entries in array
 708 
 709   // Support for parallelizing survivor space rescan
 710   HeapWord** _survivor_chunk_array;
 711   size_t     _survivor_chunk_index;
 712   size_t     _survivor_chunk_capacity;
 713   size_t*    _cursor;
 714   ChunkArray* _survivor_plab_array;
 715 
 716   // Support for marking stack overflow handling
 717   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
 718   bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
 719   void push_on_overflow_list(oop p);
 720   void par_push_on_overflow_list(oop p);
 721   // the following is, obviously, not, in general, "MT-stable"
 722   bool overflow_list_is_empty() const;
 723 
 724   void preserve_mark_if_necessary(oop p);
 725   void par_preserve_mark_if_necessary(oop p);
 726   void preserve_mark_work(oop p, markOop m);
 727   void restore_preserved_marks_if_any();
 728   NOT_PRODUCT(bool no_preserved_marks() const;)
 729   // in support of testing overflow code
 730   NOT_PRODUCT(int _overflow_counter;)
 731   NOT_PRODUCT(bool simulate_overflow();)       // sequential
 732   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 733 
 734   // CMS work methods
 735   void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
 736 
 737   // a return value of false indicates failure due to stack overflow
 738   bool markFromRootsWork(bool asynch);  // concurrent marking work
 739 
 740  public:   // FIX ME!!! only for testing
 741   bool do_marking_st(bool asynch);      // single-threaded marking
 742   bool do_marking_mt(bool asynch);      // multi-threaded  marking
 743 
 744  private:
 745 
 746   // concurrent precleaning work
 747   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
 748                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
 749   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
 750                              ScanMarkedObjectsAgainCarefullyClosure* cl);
 751   // Does precleaning work, returning a quantity indicative of
 752   // the amount of "useful work" done.
 753   size_t preclean_work(bool clean_refs, bool clean_survivors);
 754   void abortable_preclean(); // Preclean while looking for possible abort
 755   void initialize_sequential_subtasks_for_young_gen_rescan(int i);
 756   // Helper function for above; merge-sorts the per-thread plab samples
 757   void merge_survivor_plab_arrays(ContiguousSpace* surv);
 758   // Resets (i.e. clears) the per-thread plab sample vectors
 759   void reset_survivor_plab_arrays();
 760 
 761   // final (second) checkpoint work
 762   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
 763                                 bool init_mark_was_synchronous);
 764   // work routine for parallel version of remark
 765   void do_remark_parallel();
 766   // work routine for non-parallel version of remark
 767   void do_remark_non_parallel();
 768   // reference processing work routine (during second checkpoint)
 769   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
 770 
 771   // concurrent sweeping work
 772   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
 773 
 774   // (concurrent) resetting of support data structures
 775   void reset(bool asynch);
 776 
 777   // Clear _expansion_cause fields of constituent generations
 778   void clear_expansion_cause();
 779 
 780   // An auxilliary method used to record the ends of
 781   // used regions of each generation to limit the extent of sweep
 782   void save_sweep_limits();
 783 
 784   // Resize the generations included in the collector.
 785   void compute_new_size();
 786 
 787   // A work method used by foreground collection to determine
 788   // what type of collection (compacting or not, continuing or fresh)
 789   // it should do.
 790   void decide_foreground_collection_type(bool clear_all_soft_refs,
 791     bool* should_compact, bool* should_start_over);
 792 
 793   // A work method used by the foreground collector to do
 794   // a mark-sweep-compact.
 795   void do_compaction_work(bool clear_all_soft_refs);
 796 
 797   // A work method used by the foreground collector to do
 798   // a mark-sweep, after taking over from a possibly on-going
 799   // concurrent mark-sweep collection.
 800   void do_mark_sweep_work(bool clear_all_soft_refs,
 801     CollectorState first_state, bool should_start_over);
 802 
 803   // If the backgrould GC is active, acquire control from the background
 804   // GC and do the collection.
 805   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 806 
 807   // For synchronizing passing of control from background to foreground
 808   // GC.  waitForForegroundGC() is called by the background
 809   // collector.  It if had to wait for a foreground collection,
 810   // it returns true and the background collection should assume
 811   // that the collection was finished by the foreground
 812   // collector.
 813   bool waitForForegroundGC();
 814 
 815   // Incremental mode triggering:  recompute the icms duty cycle and set the
 816   // allocation limits in the young gen.
 817   void icms_update_allocation_limits();
 818 
 819   size_t block_size_using_printezis_bits(HeapWord* addr) const;
 820   size_t block_size_if_printezis_bits(HeapWord* addr) const;
 821   HeapWord* next_card_start_after_block(HeapWord* addr) const;
 822 
 823   void setup_cms_unloading_and_verification_state();
 824  public:
 825   CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 826                ConcurrentMarkSweepGeneration* permGen,
 827                CardTableRS*                   ct,
 828                ConcurrentMarkSweepPolicy*     cp);
 829   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
 830 
 831   ReferenceProcessor* ref_processor() { return _ref_processor; }
 832   void ref_processor_init();
 833 
 834   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 835   static CollectorState abstract_state() { return _collectorState;  }
 836 
 837   bool should_abort_preclean() const; // Whether preclean should be aborted.
 838   size_t get_eden_used() const;
 839   size_t get_eden_capacity() const;
 840 
 841   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 842 
 843   // locking checks
 844   NOT_PRODUCT(static bool have_cms_token();)
 845 
 846   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 847   bool shouldConcurrentCollect();
 848 
 849   void collect(bool   full,
 850                bool   clear_all_soft_refs,
 851                size_t size,
 852                bool   tlab);
 853   void collect_in_background(bool clear_all_soft_refs);
 854   void collect_in_foreground(bool clear_all_soft_refs);
 855 
 856   // In support of ExplicitGCInvokesConcurrent
 857   static void request_full_gc(unsigned int full_gc_count);
 858   // Should we unload classes in a particular concurrent cycle?
 859   bool should_unload_classes() const {
 860     return _should_unload_classes;
 861   }
 862   bool update_should_unload_classes();
 863 
 864   void direct_allocated(HeapWord* start, size_t size);
 865 
 866   // Object is dead if not marked and current phase is sweeping.
 867   bool is_dead_obj(oop obj) const;
 868 
 869   // After a promotion (of "start"), do any necessary marking.
 870   // If "par", then it's being done by a parallel GC thread.
 871   // The last two args indicate if we need precise marking
 872   // and if so the size of the object so it can be dirtied
 873   // in its entirety.
 874   void promoted(bool par, HeapWord* start,
 875                 bool is_obj_array, size_t obj_size);
 876 
 877   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
 878                                      size_t word_size);
 879 
 880   void getFreelistLocks() const;
 881   void releaseFreelistLocks() const;
 882   bool haveFreelistLocks() const;
 883 
 884   // GC prologue and epilogue
 885   void gc_prologue(bool full);
 886   void gc_epilogue(bool full);
 887 
 888   jlong time_of_last_gc(jlong now) {
 889     if (_collectorState <= Idling) {
 890       // gc not in progress
 891       return _time_of_last_gc;
 892     } else {
 893       // collection in progress
 894       return now;
 895     }
 896   }
 897 
 898   // Support for parallel remark of survivor space
 899   void* get_data_recorder(int thr_num);
 900 
 901   CMSBitMap* markBitMap()  { return &_markBitMap; }
 902   void directAllocated(HeapWord* start, size_t size);
 903 
 904   // main CMS steps and related support
 905   void checkpointRootsInitial(bool asynch);
 906   bool markFromRoots(bool asynch);  // a return value of false indicates failure
 907                                     // due to stack overflow
 908   void preclean();
 909   void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
 910                             bool init_mark_was_synchronous);
 911   void sweep(bool asynch);
 912 
 913   // Check that the currently executing thread is the expected
 914   // one (foreground collector or background collector).
 915   static void check_correct_thread_executing() PRODUCT_RETURN;
 916   // XXXPERM void print_statistics()           PRODUCT_RETURN;
 917 
 918   bool is_cms_reachable(HeapWord* addr);
 919 
 920   // Performance Counter Support
 921   CollectorCounters* counters()    { return _gc_counters; }
 922 
 923   // timer stuff
 924   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
 925   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
 926   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
 927   double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
 928 
 929   int  yields()          { return _numYields; }
 930   void resetYields()     { _numYields = 0;    }
 931   void incrementYields() { _numYields++;      }
 932   void resetNumDirtyCards()               { _numDirtyCards = 0; }
 933   void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
 934   size_t  numDirtyCards()                 { return _numDirtyCards; }
 935 
 936   static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
 937   static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
 938   static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
 939   static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
 940   size_t sweep_count() const             { return _sweep_count; }
 941   void   increment_sweep_count()         { _sweep_count++; }
 942 
 943   // Timers/stats for gc scheduling and incremental mode pacing.
 944   CMSStats& stats() { return _stats; }
 945 
 946   // Convenience methods that check whether CMSIncrementalMode is enabled and
 947   // forward to the corresponding methods in ConcurrentMarkSweepThread.
 948   static void start_icms();
 949   static void stop_icms();    // Called at the end of the cms cycle.
 950   static void disable_icms(); // Called before a foreground collection.
 951   static void enable_icms();  // Called after a foreground collection.
 952   void icms_wait();          // Called at yield points.
 953 
 954   // Adaptive size policy
 955   CMSAdaptiveSizePolicy* size_policy();
 956   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
 957 
 958   // debugging
 959   void verify(bool);
 960   bool verify_after_remark();
 961   void verify_ok_to_terminate() const PRODUCT_RETURN;
 962   void verify_work_stacks_empty() const PRODUCT_RETURN;
 963   void verify_overflow_empty() const PRODUCT_RETURN;
 964 
 965   // convenience methods in support of debugging
 966   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
 967   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
 968 
 969   // accessors
 970   CMSMarkStack* verification_mark_stack() { return &_markStack; }
 971   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
 972 
 973   // Get the bit map with a perm gen "deadness" information.
 974   CMSBitMap* perm_gen_verify_bit_map()       { return &_perm_gen_verify_bit_map; }
 975 
 976   // Initialization errors
 977   bool completed_initialization() { return _completed_initialization; }
 978 };
 979 
 980 class CMSExpansionCause : public AllStatic  {
 981  public:
 982   enum Cause {
 983     _no_expansion,
 984     _satisfy_free_ratio,
 985     _satisfy_promotion,
 986     _satisfy_allocation,
 987     _allocate_par_lab,
 988     _allocate_par_spooling_space,
 989     _adaptive_size_policy
 990   };
 991   // Return a string describing the cause of the expansion.
 992   static const char* to_string(CMSExpansionCause::Cause cause);
 993 };
 994 
 995 class ConcurrentMarkSweepGeneration: public CardGeneration {
 996   friend class VMStructs;
 997   friend class ConcurrentMarkSweepThread;
 998   friend class ConcurrentMarkSweep;
 999   friend class CMSCollector;
1000  protected:
1001   static CMSCollector*       _collector; // the collector that collects us
1002   CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
1003 
1004   // Performance Counters
1005   GenerationCounters*      _gen_counters;
1006   GSpaceCounters*          _space_counters;
1007 
1008   // Words directly allocated, used by CMSStats.
1009   size_t _direct_allocated_words;
1010 
1011   // Non-product stat counters
1012   NOT_PRODUCT(
1013     int _numObjectsPromoted;
1014     int _numWordsPromoted;
1015     int _numObjectsAllocated;
1016     int _numWordsAllocated;
1017   )
1018 
1019   // Used for sizing decisions
1020   bool _incremental_collection_failed;
1021   bool incremental_collection_failed() {
1022     return _incremental_collection_failed;
1023   }
1024   void set_incremental_collection_failed() {
1025     _incremental_collection_failed = true;
1026   }
1027   void clear_incremental_collection_failed() {
1028     _incremental_collection_failed = false;
1029   }
1030 
1031   // accessors
1032   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1033   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1034 
1035  private:
1036   // For parallel young-gen GC support.
1037   CMSParGCThreadState** _par_gc_thread_states;
1038 
1039   // Reason generation was expanded
1040   CMSExpansionCause::Cause _expansion_cause;
1041 
1042   // In support of MinChunkSize being larger than min object size
1043   const double _dilatation_factor;
1044 
1045   enum CollectionTypes {
1046     Concurrent_collection_type          = 0,
1047     MS_foreground_collection_type       = 1,
1048     MSC_foreground_collection_type      = 2,
1049     Unknown_collection_type             = 3
1050   };
1051 
1052   CollectionTypes _debug_collection_type;
1053 
1054   // Fraction of current occupancy at which to start a CMS collection which
1055   // will collect this generation (at least).
1056   double _initiating_occupancy;
1057 
1058  protected:
1059   // Shrink generation by specified size (returns false if unable to shrink)
1060   virtual void shrink_by(size_t bytes);
1061 
1062   // Update statistics for GC
1063   virtual void update_gc_stats(int level, bool full);
1064 
1065   // Maximum available space in the generation (including uncommitted)
1066   // space.
1067   size_t max_available() const;
1068 
1069   // getter and initializer for _initiating_occupancy field.
1070   double initiating_occupancy() const { return _initiating_occupancy; }
1071   void   init_initiating_occupancy(intx io, intx tr);
1072 
1073  public:
1074   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1075                                 int level, CardTableRS* ct,
1076                                 bool use_adaptive_freelists,
1077                                 FreeBlockDictionary::DictionaryChoice);
1078 
1079   // Accessors
1080   CMSCollector* collector() const { return _collector; }
1081   static void set_collector(CMSCollector* collector) {
1082     assert(_collector == NULL, "already set");
1083     _collector = collector;
1084   }
1085   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1086 
1087   Mutex* freelistLock() const;
1088 
1089   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1090 
1091   // Adaptive size policy
1092   CMSAdaptiveSizePolicy* size_policy();
1093 
1094   bool refs_discovery_is_atomic() const { return false; }
1095   bool refs_discovery_is_mt()     const {
1096     // Note: CMS does MT-discovery during the parallel-remark
1097     // phases. Use ReferenceProcessorMTMutator to make refs
1098     // discovery MT-safe during such phases or other parallel
1099     // discovery phases in the future. This may all go away
1100     // if/when we decide that refs discovery is sufficiently
1101     // rare that the cost of the CAS's involved is in the
1102     // noise. That's a measurement that should be done, and
1103     // the code simplified if that turns out to be the case.
1104     return false;
1105   }
1106 
1107   // Override
1108   virtual void ref_processor_init();
1109 
1110   // Grow generation by specified size (returns false if unable to grow)
1111   bool grow_by(size_t bytes);
1112   // Grow generation to reserved size.
1113   bool grow_to_reserved();
1114 
1115   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1116 
1117   // Space enquiries
1118   size_t capacity() const;
1119   size_t used() const;
1120   size_t free() const;
1121   double occupancy() const { return ((double)used())/((double)capacity()); }
1122   size_t contiguous_available() const;
1123   size_t unsafe_max_alloc_nogc() const;
1124 
1125   // over-rides
1126   MemRegion used_region() const;
1127   MemRegion used_region_at_save_marks() const;
1128 
1129   // Does a "full" (forced) collection invoked on this generation collect
1130   // all younger generations as well? Note that the second conjunct is a
1131   // hack to allow the collection of the younger gen first if the flag is
1132   // set. This is better than using th policy's should_collect_gen0_first()
1133   // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1134   virtual bool full_collects_younger_generations() const {
1135     return UseCMSCompactAtFullCollection && !CollectGen0First;
1136   }
1137 
1138   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1139 
1140   // Support for compaction
1141   CompactibleSpace* first_compaction_space() const;
1142   // Adjust quantites in the generation affected by
1143   // the compaction.
1144   void reset_after_compaction();
1145 
1146   // Allocation support
1147   HeapWord* allocate(size_t size, bool tlab);
1148   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1149   oop       promote(oop obj, size_t obj_size);
1150   HeapWord* par_allocate(size_t size, bool tlab) {
1151     return allocate(size, tlab);
1152   }
1153 
1154   // Incremental mode triggering.
1155   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1156                                      size_t word_size);
1157 
1158   // Used by CMSStats to track direct allocation.  The value is sampled and
1159   // reset after each young gen collection.
1160   size_t direct_allocated_words() const { return _direct_allocated_words; }
1161   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1162 
1163   // Overrides for parallel promotion.
1164   virtual oop par_promote(int thread_num,
1165                           oop obj, markOop m, size_t word_sz);
1166   // This one should not be called for CMS.
1167   virtual void par_promote_alloc_undo(int thread_num,
1168                                       HeapWord* obj, size_t word_sz);
1169   virtual void par_promote_alloc_done(int thread_num);
1170   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1171 
1172   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1173     bool younger_handles_promotion_failure) const;
1174 
1175   // Inform this (non-young) generation that a promotion failure was
1176   // encountered during a collection of a younger generation that
1177   // promotes into this generation.
1178   virtual void promotion_failure_occurred();
1179 
1180   bool should_collect(bool full, size_t size, bool tlab);
1181   virtual bool should_concurrent_collect() const;
1182   virtual bool is_too_full() const;
1183   void collect(bool   full,
1184                bool   clear_all_soft_refs,
1185                size_t size,
1186                bool   tlab);
1187 
1188   HeapWord* expand_and_allocate(size_t word_size,
1189                                 bool tlab,
1190                                 bool parallel = false);
1191 
1192   // GC prologue and epilogue
1193   void gc_prologue(bool full);
1194   void gc_prologue_work(bool full, bool registerClosure,
1195                         ModUnionClosure* modUnionClosure);
1196   void gc_epilogue(bool full);
1197   void gc_epilogue_work(bool full);
1198 
1199   // Time since last GC of this generation
1200   jlong time_of_last_gc(jlong now) {
1201     return collector()->time_of_last_gc(now);
1202   }
1203   void update_time_of_last_gc(jlong now) {
1204     collector()-> update_time_of_last_gc(now);
1205   }
1206 
1207   // Allocation failure
1208   void expand(size_t bytes, size_t expand_bytes,
1209     CMSExpansionCause::Cause cause);
1210   virtual bool expand(size_t bytes, size_t expand_bytes);
1211   void shrink(size_t bytes);
1212   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1213   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1214 
1215   // Iteration support and related enquiries
1216   void save_marks();
1217   bool no_allocs_since_save_marks();
1218   void object_iterate_since_last_GC(ObjectClosure* cl);
1219   void younger_refs_iterate(OopsInGenClosure* cl);
1220 
1221   // Iteration support specific to CMS generations
1222   void save_sweep_limit();
1223 
1224   // More iteration support
1225   virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1226   virtual void oop_iterate(OopClosure* cl);
1227   virtual void safe_object_iterate(ObjectClosure* cl);
1228   virtual void object_iterate(ObjectClosure* cl);
1229 
1230   // Need to declare the full complement of closures, whether we'll
1231   // override them or not, or get message from the compiler:
1232   //   oop_since_save_marks_iterate_nv hides virtual function...
1233   #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1234     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1235   ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1236 
1237   // Smart allocation  XXX -- move to CFLSpace?
1238   void setNearLargestChunk();
1239   bool isNearLargestChunk(HeapWord* addr);
1240 
1241   // Get the chunk at the end of the space.  Delagates to
1242   // the space.
1243   FreeChunk* find_chunk_at_end();
1244 
1245   // Overriding of unused functionality (sharing not yet supported with CMS)
1246   void pre_adjust_pointers();
1247   void post_compact();
1248 
1249   // Debugging
1250   void prepare_for_verify();
1251   void verify(bool allow_dirty);
1252   void print_statistics()               PRODUCT_RETURN;
1253 
1254   // Performance Counters support
1255   virtual void update_counters();
1256   virtual void update_counters(size_t used);
1257   void initialize_performance_counters();
1258   CollectorCounters* counters()  { return collector()->counters(); }
1259 
1260   // Support for parallel remark of survivor space
1261   void* get_data_recorder(int thr_num) {
1262     //Delegate to collector
1263     return collector()->get_data_recorder(thr_num);
1264   }
1265 
1266   // Printing
1267   const char* name() const;
1268   virtual const char* short_name() const { return "CMS"; }
1269   void        print() const;
1270   void printOccupancy(const char* s);
1271   bool must_be_youngest() const { return false; }
1272   bool must_be_oldest()   const { return true; }
1273 
1274   void compute_new_size();
1275 
1276   CollectionTypes debug_collection_type() { return _debug_collection_type; }
1277   void rotate_debug_collection_type();
1278 };
1279 
1280 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
1281 
1282   // Return the size policy from the heap's collector
1283   // policy casted to CMSAdaptiveSizePolicy*.
1284   CMSAdaptiveSizePolicy* cms_size_policy() const;
1285 
1286   // Resize the generation based on the adaptive size
1287   // policy.
1288   void resize(size_t cur_promo, size_t desired_promo);
1289 
1290   // Return the GC counters from the collector policy
1291   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1292 
1293   virtual void shrink_by(size_t bytes);
1294 
1295  public:
1296   virtual void compute_new_size();
1297   ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1298                                   int level, CardTableRS* ct,
1299                                   bool use_adaptive_freelists,
1300                                   FreeBlockDictionary::DictionaryChoice
1301                                     dictionaryChoice) :
1302     ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1303       use_adaptive_freelists, dictionaryChoice) {}
1304 
1305   virtual const char* short_name() const { return "ASCMS"; }
1306   virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1307 
1308   virtual void update_counters();
1309   virtual void update_counters(size_t used);
1310 };
1311 
1312 //
1313 // Closures of various sorts used by CMS to accomplish its work
1314 //
1315 
1316 // This closure is used to check that a certain set of oops is empty.
1317 class FalseClosure: public OopClosure {
1318  public:
1319   void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
1320   void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
1321 };
1322 
1323 // This closure is used to do concurrent marking from the roots
1324 // following the first checkpoint.
1325 class MarkFromRootsClosure: public BitMapClosure {
1326   CMSCollector*  _collector;
1327   MemRegion      _span;
1328   CMSBitMap*     _bitMap;
1329   CMSBitMap*     _mut;
1330   CMSMarkStack*  _markStack;
1331   CMSMarkStack*  _revisitStack;
1332   bool           _yield;
1333   int            _skipBits;
1334   HeapWord*      _finger;
1335   HeapWord*      _threshold;
1336   DEBUG_ONLY(bool _verifying;)
1337 
1338  public:
1339   MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1340                        CMSBitMap* bitMap,
1341                        CMSMarkStack*  markStack,
1342                        CMSMarkStack*  revisitStack,
1343                        bool should_yield, bool verifying = false);
1344   bool do_bit(size_t offset);
1345   void reset(HeapWord* addr);
1346   inline void do_yield_check();
1347 
1348  private:
1349   void scanOopsInOop(HeapWord* ptr);
1350   void do_yield_work();
1351 };
1352 
1353 // This closure is used to do concurrent multi-threaded
1354 // marking from the roots following the first checkpoint.
1355 // XXX This should really be a subclass of The serial version
1356 // above, but i have not had the time to refactor things cleanly.
1357 // That willbe done for Dolphin.
1358 class Par_MarkFromRootsClosure: public BitMapClosure {
1359   CMSCollector*  _collector;
1360   MemRegion      _whole_span;
1361   MemRegion      _span;
1362   CMSBitMap*     _bit_map;
1363   CMSBitMap*     _mut;
1364   OopTaskQueue*  _work_queue;
1365   CMSMarkStack*  _overflow_stack;
1366   CMSMarkStack*  _revisit_stack;
1367   bool           _yield;
1368   int            _skip_bits;
1369   HeapWord*      _finger;
1370   HeapWord*      _threshold;
1371   CMSConcMarkingTask* _task;
1372  public:
1373   Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1374                        MemRegion span,
1375                        CMSBitMap* bit_map,
1376                        OopTaskQueue* work_queue,
1377                        CMSMarkStack*  overflow_stack,
1378                        CMSMarkStack*  revisit_stack,
1379                        bool should_yield);
1380   bool do_bit(size_t offset);
1381   inline void do_yield_check();
1382 
1383  private:
1384   void scan_oops_in_oop(HeapWord* ptr);
1385   void do_yield_work();
1386   bool get_work_from_overflow_stack();
1387 };
1388 
1389 // The following closures are used to do certain kinds of verification of
1390 // CMS marking.
1391 class PushAndMarkVerifyClosure: public OopClosure {
1392   CMSCollector*    _collector;
1393   MemRegion        _span;
1394   CMSBitMap*       _verification_bm;
1395   CMSBitMap*       _cms_bm;
1396   CMSMarkStack*    _mark_stack;
1397  protected:
1398   void do_oop(oop p);
1399   template <class T> inline void do_oop_work(T *p) {
1400     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1401     do_oop(obj);
1402   }
1403  public:
1404   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1405                            MemRegion span,
1406                            CMSBitMap* verification_bm,
1407                            CMSBitMap* cms_bm,
1408                            CMSMarkStack*  mark_stack);
1409   void do_oop(oop* p);
1410   void do_oop(narrowOop* p);
1411   // Deal with a stack overflow condition
1412   void handle_stack_overflow(HeapWord* lost);
1413 };
1414 
1415 class MarkFromRootsVerifyClosure: public BitMapClosure {
1416   CMSCollector*  _collector;
1417   MemRegion      _span;
1418   CMSBitMap*     _verification_bm;
1419   CMSBitMap*     _cms_bm;
1420   CMSMarkStack*  _mark_stack;
1421   HeapWord*      _finger;
1422   PushAndMarkVerifyClosure _pam_verify_closure;
1423  public:
1424   MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1425                              CMSBitMap* verification_bm,
1426                              CMSBitMap* cms_bm,
1427                              CMSMarkStack*  mark_stack);
1428   bool do_bit(size_t offset);
1429   void reset(HeapWord* addr);
1430 };
1431 
1432 
1433 // This closure is used to check that a certain set of bits is
1434 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1435 class FalseBitMapClosure: public BitMapClosure {
1436  public:
1437   bool do_bit(size_t offset) {
1438     guarantee(false, "Should not have a 1 bit");
1439     return true;
1440   }
1441 };
1442 
1443 // This closure is used during the second checkpointing phase
1444 // to rescan the marked objects on the dirty cards in the mod
1445 // union table and the card table proper. It's invoked via
1446 // MarkFromDirtyCardsClosure below. It uses either
1447 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1448 // declared in genOopClosures.hpp to accomplish some of its work.
1449 // In the parallel case the bitMap is shared, so access to
1450 // it needs to be suitably synchronized for updates by embedded
1451 // closures that update it; however, this closure itself only
1452 // reads the bit_map and because it is idempotent, is immune to
1453 // reading stale values.
1454 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1455   #ifdef ASSERT
1456     CMSCollector*          _collector;
1457     MemRegion              _span;
1458     union {
1459       CMSMarkStack*        _mark_stack;
1460       OopTaskQueue*        _work_queue;
1461     };
1462   #endif // ASSERT
1463   bool                       _parallel;
1464   CMSBitMap*                 _bit_map;
1465   union {
1466     MarkRefsIntoAndScanClosure*     _scan_closure;
1467     Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1468   };
1469 
1470  public:
1471   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1472                                 MemRegion span,
1473                                 ReferenceProcessor* rp,
1474                                 CMSBitMap* bit_map,
1475                                 CMSMarkStack*  mark_stack,
1476                                 CMSMarkStack*  revisit_stack,
1477                                 MarkRefsIntoAndScanClosure* cl):
1478     #ifdef ASSERT
1479       _collector(collector),
1480       _span(span),
1481       _mark_stack(mark_stack),
1482     #endif // ASSERT
1483     _parallel(false),
1484     _bit_map(bit_map),
1485     _scan_closure(cl) { }
1486 
1487   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1488                                 MemRegion span,
1489                                 ReferenceProcessor* rp,
1490                                 CMSBitMap* bit_map,
1491                                 OopTaskQueue* work_queue,
1492                                 CMSMarkStack* revisit_stack,
1493                                 Par_MarkRefsIntoAndScanClosure* cl):
1494     #ifdef ASSERT
1495       _collector(collector),
1496       _span(span),
1497       _work_queue(work_queue),
1498     #endif // ASSERT
1499     _parallel(true),
1500     _bit_map(bit_map),
1501     _par_scan_closure(cl) { }
1502 
1503   void do_object(oop obj) {
1504     guarantee(false, "Call do_object_b(oop, MemRegion) instead");
1505   }
1506   bool do_object_b(oop obj) {
1507     guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1508     return false;
1509   }
1510   bool do_object_bm(oop p, MemRegion mr);
1511 };
1512 
1513 // This closure is used during the second checkpointing phase
1514 // to rescan the marked objects on the dirty cards in the mod
1515 // union table and the card table proper. It invokes
1516 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1517 // In the parallel case, the bit map is shared and requires
1518 // synchronized access.
1519 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1520   CompactibleFreeListSpace*      _space;
1521   ScanMarkedObjectsAgainClosure  _scan_cl;
1522   size_t                         _num_dirty_cards;
1523 
1524  public:
1525   MarkFromDirtyCardsClosure(CMSCollector* collector,
1526                             MemRegion span,
1527                             CompactibleFreeListSpace* space,
1528                             CMSBitMap* bit_map,
1529                             CMSMarkStack* mark_stack,
1530                             CMSMarkStack* revisit_stack,
1531                             MarkRefsIntoAndScanClosure* cl):
1532     _space(space),
1533     _num_dirty_cards(0),
1534     _scan_cl(collector, span, collector->ref_processor(), bit_map,
1535                  mark_stack, revisit_stack, cl) { }
1536 
1537   MarkFromDirtyCardsClosure(CMSCollector* collector,
1538                             MemRegion span,
1539                             CompactibleFreeListSpace* space,
1540                             CMSBitMap* bit_map,
1541                             OopTaskQueue* work_queue,
1542                             CMSMarkStack* revisit_stack,
1543                             Par_MarkRefsIntoAndScanClosure* cl):
1544     _space(space),
1545     _num_dirty_cards(0),
1546     _scan_cl(collector, span, collector->ref_processor(), bit_map,
1547              work_queue, revisit_stack, cl) { }
1548 
1549   void do_MemRegion(MemRegion mr);
1550   void set_space(CompactibleFreeListSpace* space) { _space = space; }
1551   size_t num_dirty_cards() { return _num_dirty_cards; }
1552 };
1553 
1554 // This closure is used in the non-product build to check
1555 // that there are no MemRegions with a certain property.
1556 class FalseMemRegionClosure: public MemRegionClosure {
1557   void do_MemRegion(MemRegion mr) {
1558     guarantee(!mr.is_empty(), "Shouldn't be empty");
1559     guarantee(false, "Should never be here");
1560   }
1561 };
1562 
1563 // This closure is used during the precleaning phase
1564 // to "carefully" rescan marked objects on dirty cards.
1565 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1566 // to accomplish some of its work.
1567 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1568   CMSCollector*                  _collector;
1569   MemRegion                      _span;
1570   bool                           _yield;
1571   Mutex*                         _freelistLock;
1572   CMSBitMap*                     _bitMap;
1573   CMSMarkStack*                  _markStack;
1574   MarkRefsIntoAndScanClosure*    _scanningClosure;
1575 
1576  public:
1577   ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1578                                          MemRegion     span,
1579                                          CMSBitMap* bitMap,
1580                                          CMSMarkStack*  markStack,
1581                                          CMSMarkStack*  revisitStack,
1582                                          MarkRefsIntoAndScanClosure* cl,
1583                                          bool should_yield):
1584     _collector(collector),
1585     _span(span),
1586     _yield(should_yield),
1587     _bitMap(bitMap),
1588     _markStack(markStack),
1589     _scanningClosure(cl) {
1590   }
1591 
1592   void do_object(oop p) {
1593     guarantee(false, "call do_object_careful instead");
1594   }
1595 
1596   size_t      do_object_careful(oop p) {
1597     guarantee(false, "Unexpected caller");
1598     return 0;
1599   }
1600 
1601   size_t      do_object_careful_m(oop p, MemRegion mr);
1602 
1603   void setFreelistLock(Mutex* m) {
1604     _freelistLock = m;
1605     _scanningClosure->set_freelistLock(m);
1606   }
1607 
1608  private:
1609   inline bool do_yield_check();
1610 
1611   void do_yield_work();
1612 };
1613 
1614 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1615   CMSCollector*                  _collector;
1616   MemRegion                      _span;
1617   bool                           _yield;
1618   CMSBitMap*                     _bit_map;
1619   CMSMarkStack*                  _mark_stack;
1620   PushAndMarkClosure*            _scanning_closure;
1621   unsigned int                   _before_count;
1622 
1623  public:
1624   SurvivorSpacePrecleanClosure(CMSCollector* collector,
1625                                MemRegion     span,
1626                                CMSBitMap*    bit_map,
1627                                CMSMarkStack* mark_stack,
1628                                PushAndMarkClosure* cl,
1629                                unsigned int  before_count,
1630                                bool          should_yield):
1631     _collector(collector),
1632     _span(span),
1633     _yield(should_yield),
1634     _bit_map(bit_map),
1635     _mark_stack(mark_stack),
1636     _scanning_closure(cl),
1637     _before_count(before_count)
1638   { }
1639 
1640   void do_object(oop p) {
1641     guarantee(false, "call do_object_careful instead");
1642   }
1643 
1644   size_t      do_object_careful(oop p);
1645 
1646   size_t      do_object_careful_m(oop p, MemRegion mr) {
1647     guarantee(false, "Unexpected caller");
1648     return 0;
1649   }
1650 
1651  private:
1652   inline void do_yield_check();
1653   void do_yield_work();
1654 };
1655 
1656 // This closure is used to accomplish the sweeping work
1657 // after the second checkpoint but before the concurrent reset
1658 // phase.
1659 //
1660 // Terminology
1661 //   left hand chunk (LHC) - block of one or more chunks currently being
1662 //     coalesced.  The LHC is available for coalescing with a new chunk.
1663 //   right hand chunk (RHC) - block that is currently being swept that is
1664 //     free or garbage that can be coalesced with the LHC.
1665 // _inFreeRange is true if there is currently a LHC
1666 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1667 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1668 // _freeFinger is the address of the current LHC
1669 class SweepClosure: public BlkClosureCareful {
1670   CMSCollector*                  _collector;  // collector doing the work
1671   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
1672   CompactibleFreeListSpace*      _sp;   // Space being swept
1673   HeapWord*                      _limit;
1674   Mutex*                         _freelistLock; // Free list lock (in space)
1675   CMSBitMap*                     _bitMap;       // Marking bit map (in
1676                                                 // generation)
1677   bool                           _inFreeRange;  // Indicates if we are in the
1678                                                 // midst of a free run
1679   bool                           _freeRangeInFreeLists;
1680                                         // Often, we have just found
1681                                         // a free chunk and started
1682                                         // a new free range; we do not
1683                                         // eagerly remove this chunk from
1684                                         // the free lists unless there is
1685                                         // a possibility of coalescing.
1686                                         // When true, this flag indicates
1687                                         // that the _freeFinger below
1688                                         // points to a potentially free chunk
1689                                         // that may still be in the free lists
1690   bool                           _lastFreeRangeCoalesced;
1691                                         // free range contains chunks
1692                                         // coalesced
1693   bool                           _yield;
1694                                         // Whether sweeping should be
1695                                         // done with yields. For instance
1696                                         // when done by the foreground
1697                                         // collector we shouldn't yield.
1698   HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
1699                                                 // pointer to the "left hand
1700                                                 // chunk"
1701   size_t                         _freeRangeSize;
1702                                         // When _inFreeRange is set, this
1703                                         // indicates the accumulated size
1704                                         // of the "left hand chunk"
1705   NOT_PRODUCT(
1706     size_t                       _numObjectsFreed;
1707     size_t                       _numWordsFreed;
1708     size_t                       _numObjectsLive;
1709     size_t                       _numWordsLive;
1710     size_t                       _numObjectsAlreadyFree;
1711     size_t                       _numWordsAlreadyFree;
1712     FreeChunk*                   _last_fc;
1713   )
1714  private:
1715   // Code that is common to a free chunk or garbage when
1716   // encountered during sweeping.
1717   void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
1718                                   size_t chunkSize);
1719   // Process a free chunk during sweeping.
1720   void doAlreadyFreeChunk(FreeChunk *fc);
1721   // Process a garbage chunk during sweeping.
1722   size_t doGarbageChunk(FreeChunk *fc);
1723   // Process a live chunk during sweeping.
1724   size_t doLiveChunk(FreeChunk* fc);
1725 
1726   // Accessors.
1727   HeapWord* freeFinger() const          { return _freeFinger; }
1728   void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
1729   size_t freeRangeSize() const          { return _freeRangeSize; }
1730   void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
1731   bool inFreeRange()    const           { return _inFreeRange; }
1732   void set_inFreeRange(bool v)          { _inFreeRange = v; }
1733   bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
1734   void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1735   bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
1736   void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1737 
1738   // Initialize a free range.
1739   void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1740   // Return this chunk to the free lists.
1741   void flushCurFreeChunk(HeapWord* chunk, size_t size);
1742 
1743   // Check if we should yield and do so when necessary.
1744   inline void do_yield_check(HeapWord* addr);
1745 
1746   // Yield
1747   void do_yield_work(HeapWord* addr);
1748 
1749   // Debugging/Printing
1750   void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
1751 
1752  public:
1753   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1754                CMSBitMap* bitMap, bool should_yield);
1755   ~SweepClosure();
1756 
1757   size_t       do_blk_careful(HeapWord* addr);
1758 };
1759 
1760 // Closures related to weak references processing
1761 
1762 // During CMS' weak reference processing, this is a
1763 // work-routine/closure used to complete transitive
1764 // marking of objects as live after a certain point
1765 // in which an initial set has been completely accumulated.
1766 // This closure is currently used both during the final
1767 // remark stop-world phase, as well as during the concurrent
1768 // precleaning of the discovered reference lists.
1769 class CMSDrainMarkingStackClosure: public VoidClosure {
1770   CMSCollector*        _collector;
1771   MemRegion            _span;
1772   CMSMarkStack*        _mark_stack;
1773   CMSBitMap*           _bit_map;
1774   CMSKeepAliveClosure* _keep_alive;
1775   bool                 _concurrent_precleaning;
1776  public:
1777   CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1778                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1779                       CMSKeepAliveClosure* keep_alive,
1780                       bool cpc):
1781     _collector(collector),
1782     _span(span),
1783     _bit_map(bit_map),
1784     _mark_stack(mark_stack),
1785     _keep_alive(keep_alive),
1786     _concurrent_precleaning(cpc) {
1787     assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1788            "Mismatch");
1789   }
1790 
1791   void do_void();
1792 };
1793 
1794 // A parallel version of CMSDrainMarkingStackClosure above.
1795 class CMSParDrainMarkingStackClosure: public VoidClosure {
1796   CMSCollector*           _collector;
1797   MemRegion               _span;
1798   OopTaskQueue*           _work_queue;
1799   CMSBitMap*              _bit_map;
1800   CMSInnerParMarkAndPushClosure _mark_and_push;
1801 
1802  public:
1803   CMSParDrainMarkingStackClosure(CMSCollector* collector,
1804                                  MemRegion span, CMSBitMap* bit_map,
1805                                  CMSMarkStack* revisit_stack,
1806                                  OopTaskQueue* work_queue):
1807     _collector(collector),
1808     _span(span),
1809     _bit_map(bit_map),
1810     _work_queue(work_queue),
1811     _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
1812 
1813  public:
1814   void trim_queue(uint max);
1815   void do_void();
1816 };
1817 
1818 // Allow yielding or short-circuiting of reference list
1819 // prelceaning work.
1820 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1821   CMSCollector* _collector;
1822   void do_yield_work();
1823  public:
1824   CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1825     _collector(collector) {}
1826   virtual bool should_return();
1827 };
1828 
1829 
1830 // Convenience class that locks free list locks for given CMS collector
1831 class FreelistLocker: public StackObj {
1832  private:
1833   CMSCollector* _collector;
1834  public:
1835   FreelistLocker(CMSCollector* collector):
1836     _collector(collector) {
1837     _collector->getFreelistLocks();
1838   }
1839 
1840   ~FreelistLocker() {
1841     _collector->releaseFreelistLocks();
1842   }
1843 };
1844 
1845 // Mark all dead objects in a given space.
1846 class MarkDeadObjectsClosure: public BlkClosure {
1847   const CMSCollector*             _collector;
1848   const CompactibleFreeListSpace* _sp;
1849   CMSBitMap*                      _live_bit_map;
1850   CMSBitMap*                      _dead_bit_map;
1851 public:
1852   MarkDeadObjectsClosure(const CMSCollector* collector,
1853                          const CompactibleFreeListSpace* sp,
1854                          CMSBitMap *live_bit_map,
1855                          CMSBitMap *dead_bit_map) :
1856     _collector(collector),
1857     _sp(sp),
1858     _live_bit_map(live_bit_map),
1859     _dead_bit_map(dead_bit_map) {}
1860   size_t do_blk(HeapWord* addr);
1861 };
1862 
1863 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
1864 
1865  public:
1866   TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase);
1867   TraceCMSMemoryManagerStats();
1868 };
1869