1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // ConcurrentMarkSweepGeneration is in support of a concurrent
  26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
  27 // style. We assume, for now, that this generation is always the
  28 // seniormost generation (modulo the PermGeneration), and for simplicity
  29 // in the first implementation, that this generation is a single compactible
  30 // space. Neither of these restrictions appears essential, and will be
  31 // relaxed in the future when more time is available to implement the
  32 // greater generality (and there's a need for it).
  33 //
  34 // Concurrent mode failures are currently handled by
  35 // means of a sliding mark-compact.
  36 
  37 class CMSAdaptiveSizePolicy;
  38 class CMSConcMarkingTask;
  39 class CMSGCAdaptivePolicyCounters;
  40 class ConcurrentMarkSweepGeneration;
  41 class ConcurrentMarkSweepPolicy;
  42 class ConcurrentMarkSweepThread;
  43 class CompactibleFreeListSpace;
  44 class FreeChunk;
  45 class PromotionInfo;
  46 class ScanMarkedObjectsAgainCarefullyClosure;
  47 
  48 // A generic CMS bit map. It's the basis for both the CMS marking bit map
  49 // as well as for the mod union table (in each case only a subset of the
  50 // methods are used). This is essentially a wrapper around the BitMap class,
  51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
  52 // we have _shifter == 0. and for the mod union table we have
  53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
  54 // XXX 64-bit issues in BitMap?
  55 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
  56   friend class VMStructs;
  57 
  58   HeapWord* _bmStartWord;   // base address of range covered by map
  59   size_t    _bmWordSize;    // map size (in #HeapWords covered)
  60   const int _shifter;       // shifts to convert HeapWord to bit position
  61   VirtualSpace _virtual_space; // underlying the bit map
  62   BitMap    _bm;            // the bit map itself
  63  public:
  64   Mutex* const _lock;       // mutex protecting _bm;
  65 
  66  public:
  67   // constructor
  68   CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
  69 
  70   // allocates the actual storage for the map
  71   bool allocate(MemRegion mr);
  72   // field getter
  73   Mutex* lock() const { return _lock; }
  74   // locking verifier convenience function
  75   void assert_locked() const PRODUCT_RETURN;
  76 
  77   // inquiries
  78   HeapWord* startWord()   const { return _bmStartWord; }
  79   size_t    sizeInWords() const { return _bmWordSize;  }
  80   size_t    sizeInBits()  const { return _bm.size();   }
  81   // the following is one past the last word in space
  82   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
  83 
  84   // reading marks
  85   bool isMarked(HeapWord* addr) const;
  86   bool par_isMarked(HeapWord* addr) const; // do not lock checks
  87   bool isUnmarked(HeapWord* addr) const;
  88   bool isAllClear() const;
  89 
  90   // writing marks
  91   void mark(HeapWord* addr);
  92   // For marking by parallel GC threads;
  93   // returns true if we did, false if another thread did
  94   bool par_mark(HeapWord* addr);
  95 
  96   void mark_range(MemRegion mr);
  97   void par_mark_range(MemRegion mr);
  98   void mark_large_range(MemRegion mr);
  99   void par_mark_large_range(MemRegion mr);
 100   void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
 101   void clear_range(MemRegion mr);
 102   void par_clear_range(MemRegion mr);
 103   void clear_large_range(MemRegion mr);
 104   void par_clear_large_range(MemRegion mr);
 105   void clear_all();
 106   void clear_all_incrementally();  // Not yet implemented!!
 107 
 108   NOT_PRODUCT(
 109     // checks the memory region for validity
 110     void region_invariant(MemRegion mr);
 111   )
 112 
 113   // iteration
 114   void iterate(BitMapClosure* cl) {
 115     _bm.iterate(cl);
 116   }
 117   void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
 118   void dirty_range_iterate_clear(MemRegionClosure* cl);
 119   void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
 120 
 121   // auxiliary support for iteration
 122   HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
 123   HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
 124                                             HeapWord* end_addr) const;
 125   HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
 126   HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
 127                                               HeapWord* end_addr) const;
 128   MemRegion getAndClearMarkedRegion(HeapWord* addr);
 129   MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
 130                                            HeapWord* end_addr);
 131 
 132   // conversion utilities
 133   HeapWord* offsetToHeapWord(size_t offset) const;
 134   size_t    heapWordToOffset(HeapWord* addr) const;
 135   size_t    heapWordDiffToOffsetDiff(size_t diff) const;
 136 
 137   // debugging
 138   // is this address range covered by the bit-map?
 139   NOT_PRODUCT(
 140     bool covers(MemRegion mr) const;
 141     bool covers(HeapWord* start, size_t size = 0) const;
 142   )
 143   void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
 144 };
 145 
 146 // Represents a marking stack used by the CMS collector.
 147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
 148 class CMSMarkStack: public CHeapObj  {
 149   //
 150   friend class CMSCollector;   // to get at expasion stats further below
 151   //
 152 
 153   VirtualSpace _virtual_space;  // space for the stack
 154   oop*   _base;      // bottom of stack
 155   size_t _index;     // one more than last occupied index
 156   size_t _capacity;  // max #elements
 157   Mutex  _par_lock;  // an advisory lock used in case of parallel access
 158   NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
 159 
 160  protected:
 161   size_t _hit_limit;      // we hit max stack size limit
 162   size_t _failed_double;  // we failed expansion before hitting limit
 163 
 164  public:
 165   CMSMarkStack():
 166     _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
 167     _hit_limit(0),
 168     _failed_double(0) {}
 169 
 170   bool allocate(size_t size);
 171 
 172   size_t capacity() const { return _capacity; }
 173 
 174   oop pop() {
 175     if (!isEmpty()) {
 176       return _base[--_index] ;
 177     }
 178     return NULL;
 179   }
 180 
 181   bool push(oop ptr) {
 182     if (isFull()) {
 183       return false;
 184     } else {
 185       _base[_index++] = ptr;
 186       NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
 187       return true;
 188     }
 189   }
 190 
 191   bool isEmpty() const { return _index == 0; }
 192   bool isFull()  const {
 193     assert(_index <= _capacity, "buffer overflow");
 194     return _index == _capacity;
 195   }
 196 
 197   size_t length() { return _index; }
 198 
 199   // "Parallel versions" of some of the above
 200   oop par_pop() {
 201     // lock and pop
 202     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
 203     return pop();
 204   }
 205 
 206   bool par_push(oop ptr) {
 207     // lock and push
 208     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
 209     return push(ptr);
 210   }
 211 
 212   // Forcibly reset the stack, losing all of its contents.
 213   void reset() {
 214     _index = 0;
 215   }
 216 
 217   // Expand the stack, typically in response to an overflow condition
 218   void expand();
 219 
 220   // Compute the least valued stack element.
 221   oop least_value(HeapWord* low) {
 222      oop least = (oop)low;
 223      for (size_t i = 0; i < _index; i++) {
 224        least = MIN2(least, _base[i]);
 225      }
 226      return least;
 227   }
 228 
 229   // Exposed here to allow stack expansion in || case
 230   Mutex* par_lock() { return &_par_lock; }
 231 };
 232 
 233 class CardTableRS;
 234 class CMSParGCThreadState;
 235 
 236 class ModUnionClosure: public MemRegionClosure {
 237  protected:
 238   CMSBitMap* _t;
 239  public:
 240   ModUnionClosure(CMSBitMap* t): _t(t) { }
 241   void do_MemRegion(MemRegion mr);
 242 };
 243 
 244 class ModUnionClosurePar: public ModUnionClosure {
 245  public:
 246   ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
 247   void do_MemRegion(MemRegion mr);
 248 };
 249 
 250 // Survivor Chunk Array in support of parallelization of
 251 // Survivor Space rescan.
 252 class ChunkArray: public CHeapObj {
 253   size_t _index;
 254   size_t _capacity;
 255   HeapWord** _array;   // storage for array
 256 
 257  public:
 258   ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
 259   ChunkArray(HeapWord** a, size_t c):
 260     _index(0), _capacity(c), _array(a) {}
 261 
 262   HeapWord** array() { return _array; }
 263   void set_array(HeapWord** a) { _array = a; }
 264 
 265   size_t capacity() { return _capacity; }
 266   void set_capacity(size_t c) { _capacity = c; }
 267 
 268   size_t end() {
 269     assert(_index < capacity(), "_index out of bounds");
 270     return _index;
 271   }  // exclusive
 272 
 273   HeapWord* nth(size_t n) {
 274     assert(n < end(), "Out of bounds access");
 275     return _array[n];
 276   }
 277 
 278   void reset() {
 279     _index = 0;
 280   }
 281 
 282   void record_sample(HeapWord* p, size_t sz) {
 283     // For now we do not do anything with the size
 284     if (_index < _capacity) {
 285       _array[_index++] = p;
 286     }
 287   }
 288 };
 289 
 290 //
 291 // Timing, allocation and promotion statistics for gc scheduling and incremental
 292 // mode pacing.  Most statistics are exponential averages.
 293 //
 294 class CMSStats VALUE_OBJ_CLASS_SPEC {
 295  private:
 296   ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
 297 
 298   // The following are exponential averages with factor alpha:
 299   //   avg = (100 - alpha) * avg + alpha * cur_sample
 300   //
 301   //   The durations measure:  end_time[n] - start_time[n]
 302   //   The periods measure:    start_time[n] - start_time[n-1]
 303   //
 304   // The cms period and duration include only concurrent collections; time spent
 305   // in foreground cms collections due to System.gc() or because of a failure to
 306   // keep up are not included.
 307   //
 308   // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
 309   // real value, but is used only after the first period.  A value of 100 is
 310   // used for the first sample so it gets the entire weight.
 311   unsigned int _saved_alpha; // 0-100
 312   unsigned int _gc0_alpha;
 313   unsigned int _cms_alpha;
 314 
 315   double _gc0_duration;
 316   double _gc0_period;
 317   size_t _gc0_promoted;         // bytes promoted per gc0
 318   double _cms_duration;
 319   double _cms_duration_pre_sweep; // time from initiation to start of sweep
 320   double _cms_duration_per_mb;
 321   double _cms_period;
 322   size_t _cms_allocated;        // bytes of direct allocation per gc0 period
 323 
 324   // Timers.
 325   elapsedTimer _cms_timer;
 326   TimeStamp    _gc0_begin_time;
 327   TimeStamp    _cms_begin_time;
 328   TimeStamp    _cms_end_time;
 329 
 330   // Snapshots of the amount used in the CMS generation.
 331   size_t _cms_used_at_gc0_begin;
 332   size_t _cms_used_at_gc0_end;
 333   size_t _cms_used_at_cms_begin;
 334 
 335   // Used to prevent the duty cycle from being reduced in the middle of a cms
 336   // cycle.
 337   bool _allow_duty_cycle_reduction;
 338 
 339   enum {
 340     _GC0_VALID = 0x1,
 341     _CMS_VALID = 0x2,
 342     _ALL_VALID = _GC0_VALID | _CMS_VALID
 343   };
 344 
 345   unsigned int _valid_bits;
 346 
 347   unsigned int _icms_duty_cycle;        // icms duty cycle (0-100).
 348 
 349  protected:
 350 
 351   // Return a duty cycle that avoids wild oscillations, by limiting the amount
 352   // of change between old_duty_cycle and new_duty_cycle (the latter is treated
 353   // as a recommended value).
 354   static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
 355                                              unsigned int new_duty_cycle);
 356   unsigned int icms_update_duty_cycle_impl();
 357 
 358   // In support of adjusting of cms trigger ratios based on history
 359   // of concurrent mode failure.
 360   double cms_free_adjustment_factor(size_t free) const;
 361   void   adjust_cms_free_adjustment_factor(bool fail, size_t free);
 362 
 363  public:
 364   CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
 365            unsigned int alpha = CMSExpAvgFactor);
 366 
 367   // Whether or not the statistics contain valid data; higher level statistics
 368   // cannot be called until this returns true (they require at least one young
 369   // gen and one cms cycle to have completed).
 370   bool valid() const;
 371 
 372   // Record statistics.
 373   void record_gc0_begin();
 374   void record_gc0_end(size_t cms_gen_bytes_used);
 375   void record_cms_begin();
 376   void record_cms_end();
 377 
 378   // Allow management of the cms timer, which must be stopped/started around
 379   // yield points.
 380   elapsedTimer& cms_timer()     { return _cms_timer; }
 381   void start_cms_timer()        { _cms_timer.start(); }
 382   void stop_cms_timer()         { _cms_timer.stop(); }
 383 
 384   // Basic statistics; units are seconds or bytes.
 385   double gc0_period() const     { return _gc0_period; }
 386   double gc0_duration() const   { return _gc0_duration; }
 387   size_t gc0_promoted() const   { return _gc0_promoted; }
 388   double cms_period() const          { return _cms_period; }
 389   double cms_duration() const        { return _cms_duration; }
 390   double cms_duration_per_mb() const { return _cms_duration_per_mb; }
 391   size_t cms_allocated() const       { return _cms_allocated; }
 392 
 393   size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
 394 
 395   // Seconds since the last background cms cycle began or ended.
 396   double cms_time_since_begin() const;
 397   double cms_time_since_end() const;
 398 
 399   // Higher level statistics--caller must check that valid() returns true before
 400   // calling.
 401 
 402   // Returns bytes promoted per second of wall clock time.
 403   double promotion_rate() const;
 404 
 405   // Returns bytes directly allocated per second of wall clock time.
 406   double cms_allocation_rate() const;
 407 
 408   // Rate at which space in the cms generation is being consumed (sum of the
 409   // above two).
 410   double cms_consumption_rate() const;
 411 
 412   // Returns an estimate of the number of seconds until the cms generation will
 413   // fill up, assuming no collection work is done.
 414   double time_until_cms_gen_full() const;
 415 
 416   // Returns an estimate of the number of seconds remaining until
 417   // the cms generation collection should start.
 418   double time_until_cms_start() const;
 419 
 420   // End of higher level statistics.
 421 
 422   // Returns the cms incremental mode duty cycle, as a percentage (0-100).
 423   unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
 424 
 425   // Update the duty cycle and return the new value.
 426   unsigned int icms_update_duty_cycle();
 427 
 428   // Debugging.
 429   void print_on(outputStream* st) const PRODUCT_RETURN;
 430   void print() const { print_on(gclog_or_tty); }
 431 };
 432 
 433 // A closure related to weak references processing which
 434 // we embed in the CMSCollector, since we need to pass
 435 // it to the reference processor for secondary filtering
 436 // of references based on reachability of referent;
 437 // see role of _is_alive_non_header closure in the
 438 // ReferenceProcessor class.
 439 // For objects in the CMS generation, this closure checks
 440 // if the object is "live" (reachable). Used in weak
 441 // reference processing.
 442 class CMSIsAliveClosure: public BoolObjectClosure {
 443   const MemRegion  _span;
 444   const CMSBitMap* _bit_map;
 445 
 446   friend class CMSCollector;
 447  public:
 448   CMSIsAliveClosure(MemRegion span,
 449                     CMSBitMap* bit_map):
 450     _span(span),
 451     _bit_map(bit_map) {
 452     assert(!span.is_empty(), "Empty span could spell trouble");
 453   }
 454 
 455   void do_object(oop obj) {
 456     assert(false, "not to be invoked");
 457   }
 458 
 459   bool do_object_b(oop obj);
 460 };
 461 
 462 
 463 // Implements AbstractRefProcTaskExecutor for CMS.
 464 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 465 public:
 466 
 467   CMSRefProcTaskExecutor(CMSCollector& collector)
 468     : _collector(collector)
 469   { }
 470 
 471   // Executes a task using worker threads.
 472   virtual void execute(ProcessTask& task);
 473   virtual void execute(EnqueueTask& task);
 474 private:
 475   CMSCollector& _collector;
 476 };
 477 
 478 
 479 class CMSCollector: public CHeapObj {
 480   friend class VMStructs;
 481   friend class ConcurrentMarkSweepThread;
 482   friend class ConcurrentMarkSweepGeneration;
 483   friend class CompactibleFreeListSpace;
 484   friend class CMSParRemarkTask;
 485   friend class CMSConcMarkingTask;
 486   friend class CMSRefProcTaskProxy;
 487   friend class CMSRefProcTaskExecutor;
 488   friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
 489   friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
 490   friend class PushOrMarkClosure;             // to access _restart_addr
 491   friend class Par_PushOrMarkClosure;             // to access _restart_addr
 492   friend class MarkFromRootsClosure;          //  -- ditto --
 493                                               // ... and for clearing cards
 494   friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
 495                                               // ... and for clearing cards
 496   friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
 497   friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
 498   friend class PushAndMarkVerifyClosure;      //  -- ditto --
 499   friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
 500   friend class PushAndMarkClosure;            //  -- ditto --
 501   friend class Par_PushAndMarkClosure;        //  -- ditto --
 502   friend class CMSKeepAliveClosure;           //  -- ditto --
 503   friend class CMSDrainMarkingStackClosure;   //  -- ditto --
 504   friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
 505   NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
 506   friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
 507   friend class VM_CMS_Operation;
 508   friend class VM_CMS_Initial_Mark;
 509   friend class VM_CMS_Final_Remark;
 510 
 511  private:
 512   jlong _time_of_last_gc;
 513   void update_time_of_last_gc(jlong now) {
 514     _time_of_last_gc = now;
 515   }
 516 
 517   OopTaskQueueSet* _task_queues;
 518 
 519   // Overflow list of grey objects, threaded through mark-word
 520   // Manipulated with CAS in the parallel/multi-threaded case.
 521   oop _overflow_list;
 522   // The following array-pair keeps track of mark words
 523   // displaced for accomodating overflow list above.
 524   // This code will likely be revisited under RFE#4922830.
 525   GrowableArray<oop>*     _preserved_oop_stack;
 526   GrowableArray<markOop>* _preserved_mark_stack;
 527 
 528   int*             _hash_seed;
 529 
 530   // In support of multi-threaded concurrent phases
 531   YieldingFlexibleWorkGang* _conc_workers;
 532 
 533   // Performance Counters
 534   CollectorCounters* _gc_counters;
 535 
 536   // Initialization Errors
 537   bool _completed_initialization;
 538 
 539   // In support of ExplicitGCInvokesConcurrent
 540   static   bool _full_gc_requested;
 541   unsigned int  _collection_count_start;
 542 
 543   // Should we unload classes this concurrent cycle?
 544   bool _should_unload_classes;
 545   unsigned int  _concurrent_cycles_since_last_unload;
 546   unsigned int concurrent_cycles_since_last_unload() const {
 547     return _concurrent_cycles_since_last_unload;
 548   }
 549   // Did we (allow) unload classes in the previous concurrent cycle?
 550   bool unloaded_classes_last_cycle() const {
 551     return concurrent_cycles_since_last_unload() == 0;
 552   }
 553   // Root scanning options for perm gen
 554   int _roots_scanning_options;
 555   int roots_scanning_options() const      { return _roots_scanning_options; }
 556   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
 557   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
 558 
 559   // Verification support
 560   CMSBitMap     _verification_mark_bm;
 561   void verify_after_remark_work_1();
 562   void verify_after_remark_work_2();
 563 
 564   // true if any verification flag is on.
 565   bool _verifying;
 566   bool verifying() const { return _verifying; }
 567   void set_verifying(bool v) { _verifying = v; }
 568 
 569   // Collector policy
 570   ConcurrentMarkSweepPolicy* _collector_policy;
 571   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 572 
 573   // XXX Move these to CMSStats ??? FIX ME !!!
 574   elapsedTimer _inter_sweep_timer;   // time between sweeps
 575   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
 576   // padded decaying average estimates of the above
 577   AdaptivePaddedAverage _inter_sweep_estimate;
 578   AdaptivePaddedAverage _intra_sweep_estimate;
 579 
 580  protected:
 581   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
 582   ConcurrentMarkSweepGeneration* _permGen; // perm gen
 583   MemRegion                      _span;    // span covering above two
 584   CardTableRS*                   _ct;      // card table
 585 
 586   // CMS marking support structures
 587   CMSBitMap     _markBitMap;
 588   CMSBitMap     _modUnionTable;
 589   CMSMarkStack  _markStack;
 590   CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
 591                                           // to revisit
 592   CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
 593 
 594   HeapWord*     _restart_addr; // in support of marking stack overflow
 595   void          lower_restart_addr(HeapWord* low);
 596 
 597   // Counters in support of marking stack / work queue overflow handling:
 598   // a non-zero value indicates certain types of overflow events during
 599   // the current CMS cycle and could lead to stack resizing efforts at
 600   // an opportune future time.
 601   size_t        _ser_pmc_preclean_ovflw;
 602   size_t        _ser_pmc_remark_ovflw;
 603   size_t        _par_pmc_remark_ovflw;
 604   size_t        _ser_kac_preclean_ovflw;
 605   size_t        _ser_kac_ovflw;
 606   size_t        _par_kac_ovflw;
 607   NOT_PRODUCT(ssize_t _num_par_pushes;)
 608 
 609   // ("Weak") Reference processing support
 610   ReferenceProcessor*            _ref_processor;
 611   CMSIsAliveClosure              _is_alive_closure;
 612       // keep this textually after _markBitMap and _span; c'tor dependency
 613 
 614   ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
 615   ModUnionClosure    _modUnionClosure;
 616   ModUnionClosurePar _modUnionClosurePar;
 617 
 618   // CMS abstract state machine
 619   // initial_state: Idling
 620   // next_state(Idling)            = {Marking}
 621   // next_state(Marking)           = {Precleaning, Sweeping}
 622   // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
 623   // next_state(AbortablePreclean) = {FinalMarking}
 624   // next_state(FinalMarking)      = {Sweeping}
 625   // next_state(Sweeping)          = {Resizing}
 626   // next_state(Resizing)          = {Resetting}
 627   // next_state(Resetting)         = {Idling}
 628   // The numeric values below are chosen so that:
 629   // . _collectorState <= Idling ==  post-sweep && pre-mark
 630   // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
 631   //                                            precleaning || abortablePrecleanb
 632  public:
 633   enum CollectorState {
 634     Resizing            = 0,
 635     Resetting           = 1,
 636     Idling              = 2,
 637     InitialMarking      = 3,
 638     Marking             = 4,
 639     Precleaning         = 5,
 640     AbortablePreclean   = 6,
 641     FinalMarking        = 7,
 642     Sweeping            = 8
 643   };
 644  protected:
 645   static CollectorState _collectorState;
 646 
 647   // State related to prologue/epilogue invocation for my generations
 648   bool _between_prologue_and_epilogue;
 649 
 650   // Signalling/State related to coordination between fore- and backgroud GC
 651   // Note: When the baton has been passed from background GC to foreground GC,
 652   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
 653   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
 654                                  // wants to go active
 655   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
 656                                  // yet passed the baton to the foreground GC
 657 
 658   // Support for CMSScheduleRemark (abortable preclean)
 659   bool _abort_preclean;
 660   bool _start_sampling;
 661 
 662   int    _numYields;
 663   size_t _numDirtyCards;
 664   size_t _sweep_count;
 665   // number of full gc's since the last concurrent gc.
 666   uint   _full_gcs_since_conc_gc;
 667 
 668   // occupancy used for bootstrapping stats
 669   double _bootstrap_occupancy;
 670 
 671   // timer
 672   elapsedTimer _timer;
 673 
 674   // Timing, allocation and promotion statistics, used for scheduling.
 675   CMSStats      _stats;
 676 
 677   // Allocation limits installed in the young gen, used only in
 678   // CMSIncrementalMode.  When an allocation in the young gen would cross one of
 679   // these limits, the cms generation is notified and the cms thread is started
 680   // or stopped, respectively.
 681   HeapWord*     _icms_start_limit;
 682   HeapWord*     _icms_stop_limit;
 683 
 684   enum CMS_op_type {
 685     CMS_op_checkpointRootsInitial,
 686     CMS_op_checkpointRootsFinal
 687   };
 688 
 689   void do_CMS_operation(CMS_op_type op);
 690   bool stop_world_and_do(CMS_op_type op);
 691 
 692   OopTaskQueueSet* task_queues() { return _task_queues; }
 693   int*             hash_seed(int i) { return &_hash_seed[i]; }
 694   YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
 695 
 696   // Support for parallelizing Eden rescan in CMS remark phase
 697   void sample_eden(); // ... sample Eden space top
 698 
 699  private:
 700   // Support for parallelizing young gen rescan in CMS remark phase
 701   Generation* _young_gen;  // the younger gen
 702   HeapWord** _top_addr;    // ... Top of Eden
 703   HeapWord** _end_addr;    // ... End of Eden
 704   HeapWord** _eden_chunk_array; // ... Eden partitioning array
 705   size_t     _eden_chunk_index; // ... top (exclusive) of array
 706   size_t     _eden_chunk_capacity;  // ... max entries in array
 707 
 708   // Support for parallelizing survivor space rescan
 709   HeapWord** _survivor_chunk_array;
 710   size_t     _survivor_chunk_index;
 711   size_t     _survivor_chunk_capacity;
 712   size_t*    _cursor;
 713   ChunkArray* _survivor_plab_array;
 714 
 715   // Support for marking stack overflow handling
 716   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
 717   bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
 718   void push_on_overflow_list(oop p);
 719   void par_push_on_overflow_list(oop p);
 720   // the following is, obviously, not, in general, "MT-stable"
 721   bool overflow_list_is_empty() const;
 722 
 723   void preserve_mark_if_necessary(oop p);
 724   void par_preserve_mark_if_necessary(oop p);
 725   void preserve_mark_work(oop p, markOop m);
 726   void restore_preserved_marks_if_any();
 727   NOT_PRODUCT(bool no_preserved_marks() const;)
 728   // in support of testing overflow code
 729   NOT_PRODUCT(int _overflow_counter;)
 730   NOT_PRODUCT(bool simulate_overflow();)       // sequential
 731   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 732 
 733   // CMS work methods
 734   void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
 735 
 736   // a return value of false indicates failure due to stack overflow
 737   bool markFromRootsWork(bool asynch);  // concurrent marking work
 738 
 739  public:   // FIX ME!!! only for testing
 740   bool do_marking_st(bool asynch);      // single-threaded marking
 741   bool do_marking_mt(bool asynch);      // multi-threaded  marking
 742 
 743  private:
 744 
 745   // concurrent precleaning work
 746   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
 747                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
 748   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
 749                              ScanMarkedObjectsAgainCarefullyClosure* cl);
 750   // Does precleaning work, returning a quantity indicative of
 751   // the amount of "useful work" done.
 752   size_t preclean_work(bool clean_refs, bool clean_survivors);
 753   void abortable_preclean(); // Preclean while looking for possible abort
 754   void initialize_sequential_subtasks_for_young_gen_rescan(int i);
 755   // Helper function for above; merge-sorts the per-thread plab samples
 756   void merge_survivor_plab_arrays(ContiguousSpace* surv);
 757   // Resets (i.e. clears) the per-thread plab sample vectors
 758   void reset_survivor_plab_arrays();
 759 
 760   // final (second) checkpoint work
 761   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
 762                                 bool init_mark_was_synchronous);
 763   // work routine for parallel version of remark
 764   void do_remark_parallel();
 765   // work routine for non-parallel version of remark
 766   void do_remark_non_parallel();
 767   // reference processing work routine (during second checkpoint)
 768   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
 769 
 770   // concurrent sweeping work
 771   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
 772 
 773   // (concurrent) resetting of support data structures
 774   void reset(bool asynch);
 775 
 776   // Clear _expansion_cause fields of constituent generations
 777   void clear_expansion_cause();
 778 
 779   // An auxilliary method used to record the ends of
 780   // used regions of each generation to limit the extent of sweep
 781   void save_sweep_limits();
 782 
 783   // Resize the generations included in the collector.
 784   void compute_new_size();
 785 
 786   // A work method used by foreground collection to determine
 787   // what type of collection (compacting or not, continuing or fresh)
 788   // it should do.
 789   void decide_foreground_collection_type(bool clear_all_soft_refs,
 790     bool* should_compact, bool* should_start_over);
 791 
 792   // A work method used by the foreground collector to do
 793   // a mark-sweep-compact.
 794   void do_compaction_work(bool clear_all_soft_refs);
 795 
 796   // A work method used by the foreground collector to do
 797   // a mark-sweep, after taking over from a possibly on-going
 798   // concurrent mark-sweep collection.
 799   void do_mark_sweep_work(bool clear_all_soft_refs,
 800     CollectorState first_state, bool should_start_over);
 801 
 802   // If the backgrould GC is active, acquire control from the background
 803   // GC and do the collection.
 804   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 805 
 806   // For synchronizing passing of control from background to foreground
 807   // GC.  waitForForegroundGC() is called by the background
 808   // collector.  It if had to wait for a foreground collection,
 809   // it returns true and the background collection should assume
 810   // that the collection was finished by the foreground
 811   // collector.
 812   bool waitForForegroundGC();
 813 
 814   // Incremental mode triggering:  recompute the icms duty cycle and set the
 815   // allocation limits in the young gen.
 816   void icms_update_allocation_limits();
 817 
 818   size_t block_size_using_printezis_bits(HeapWord* addr) const;
 819   size_t block_size_if_printezis_bits(HeapWord* addr) const;
 820   HeapWord* next_card_start_after_block(HeapWord* addr) const;
 821 
 822   void setup_cms_unloading_and_verification_state();
 823  public:
 824   CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 825                ConcurrentMarkSweepGeneration* permGen,
 826                CardTableRS*                   ct,
 827                ConcurrentMarkSweepPolicy*     cp);
 828   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
 829 
 830   ReferenceProcessor* ref_processor() { return _ref_processor; }
 831   void ref_processor_init();
 832 
 833   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 834   static CollectorState abstract_state() { return _collectorState;  }
 835 
 836   bool should_abort_preclean() const; // Whether preclean should be aborted.
 837   size_t get_eden_used() const;
 838   size_t get_eden_capacity() const;
 839 
 840   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 841 
 842   // locking checks
 843   NOT_PRODUCT(static bool have_cms_token();)
 844 
 845   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 846   bool shouldConcurrentCollect();
 847 
 848   void collect(bool   full,
 849                bool   clear_all_soft_refs,
 850                size_t size,
 851                bool   tlab);
 852   void collect_in_background(bool clear_all_soft_refs);
 853   void collect_in_foreground(bool clear_all_soft_refs);
 854 
 855   // In support of ExplicitGCInvokesConcurrent
 856   static void request_full_gc(unsigned int full_gc_count);
 857   // Should we unload classes in a particular concurrent cycle?
 858   bool should_unload_classes() const {
 859     return _should_unload_classes;
 860   }
 861   bool update_should_unload_classes();
 862 
 863   void direct_allocated(HeapWord* start, size_t size);
 864 
 865   // Object is dead if not marked and current phase is sweeping.
 866   bool is_dead_obj(oop obj) const;
 867 
 868   // After a promotion (of "start"), do any necessary marking.
 869   // If "par", then it's being done by a parallel GC thread.
 870   // The last two args indicate if we need precise marking
 871   // and if so the size of the object so it can be dirtied
 872   // in its entirety.
 873   void promoted(bool par, HeapWord* start,
 874                 bool is_obj_array, size_t obj_size);
 875 
 876   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
 877                                      size_t word_size);
 878 
 879   void getFreelistLocks() const;
 880   void releaseFreelistLocks() const;
 881   bool haveFreelistLocks() const;
 882 
 883   // GC prologue and epilogue
 884   void gc_prologue(bool full);
 885   void gc_epilogue(bool full);
 886 
 887   jlong time_of_last_gc(jlong now) {
 888     if (_collectorState <= Idling) {
 889       // gc not in progress
 890       return _time_of_last_gc;
 891     } else {
 892       // collection in progress
 893       return now;
 894     }
 895   }
 896 
 897   // Support for parallel remark of survivor space
 898   void* get_data_recorder(int thr_num);
 899 
 900   CMSBitMap* markBitMap()  { return &_markBitMap; }
 901   void directAllocated(HeapWord* start, size_t size);
 902 
 903   // main CMS steps and related support
 904   void checkpointRootsInitial(bool asynch);
 905   bool markFromRoots(bool asynch);  // a return value of false indicates failure
 906                                     // due to stack overflow
 907   void preclean();
 908   void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
 909                             bool init_mark_was_synchronous);
 910   void sweep(bool asynch);
 911 
 912   // Check that the currently executing thread is the expected
 913   // one (foreground collector or background collector).
 914   static void check_correct_thread_executing() PRODUCT_RETURN;
 915   // XXXPERM void print_statistics()           PRODUCT_RETURN;
 916 
 917   bool is_cms_reachable(HeapWord* addr);
 918 
 919   // Performance Counter Support
 920   CollectorCounters* counters()    { return _gc_counters; }
 921 
 922   // timer stuff
 923   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
 924   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
 925   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
 926   double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
 927 
 928   int  yields()          { return _numYields; }
 929   void resetYields()     { _numYields = 0;    }
 930   void incrementYields() { _numYields++;      }
 931   void resetNumDirtyCards()               { _numDirtyCards = 0; }
 932   void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
 933   size_t  numDirtyCards()                 { return _numDirtyCards; }
 934 
 935   static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
 936   static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
 937   static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
 938   static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
 939   size_t sweep_count() const             { return _sweep_count; }
 940   void   increment_sweep_count()         { _sweep_count++; }
 941 
 942   // Timers/stats for gc scheduling and incremental mode pacing.
 943   CMSStats& stats() { return _stats; }
 944 
 945   // Convenience methods that check whether CMSIncrementalMode is enabled and
 946   // forward to the corresponding methods in ConcurrentMarkSweepThread.
 947   static void start_icms();
 948   static void stop_icms();    // Called at the end of the cms cycle.
 949   static void disable_icms(); // Called before a foreground collection.
 950   static void enable_icms();  // Called after a foreground collection.
 951   void icms_wait();          // Called at yield points.
 952 
 953   // Adaptive size policy
 954   CMSAdaptiveSizePolicy* size_policy();
 955   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
 956 
 957   // debugging
 958   void verify(bool);
 959   bool verify_after_remark();
 960   void verify_ok_to_terminate() const PRODUCT_RETURN;
 961   void verify_work_stacks_empty() const PRODUCT_RETURN;
 962   void verify_overflow_empty() const PRODUCT_RETURN;
 963 
 964   // convenience methods in support of debugging
 965   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
 966   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
 967 
 968   // accessors
 969   CMSMarkStack* verification_mark_stack() { return &_markStack; }
 970   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
 971 
 972   // Get the bit map with a perm gen "deadness" information.
 973   CMSBitMap* perm_gen_verify_bit_map()       { return &_perm_gen_verify_bit_map; }
 974 
 975   // Initialization errors
 976   bool completed_initialization() { return _completed_initialization; }
 977 };
 978 
 979 class CMSExpansionCause : public AllStatic  {
 980  public:
 981   enum Cause {
 982     _no_expansion,
 983     _satisfy_free_ratio,
 984     _satisfy_promotion,
 985     _satisfy_allocation,
 986     _allocate_par_lab,
 987     _allocate_par_spooling_space,
 988     _adaptive_size_policy
 989   };
 990   // Return a string describing the cause of the expansion.
 991   static const char* to_string(CMSExpansionCause::Cause cause);
 992 };
 993 
 994 class ConcurrentMarkSweepGeneration: public CardGeneration {
 995   friend class VMStructs;
 996   friend class ConcurrentMarkSweepThread;
 997   friend class ConcurrentMarkSweep;
 998   friend class CMSCollector;
 999  protected:
1000   static CMSCollector*       _collector; // the collector that collects us
1001   CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
1002 
1003   // Performance Counters
1004   GenerationCounters*      _gen_counters;
1005   GSpaceCounters*          _space_counters;
1006 
1007   // Words directly allocated, used by CMSStats.
1008   size_t _direct_allocated_words;
1009 
1010   // Non-product stat counters
1011   NOT_PRODUCT(
1012     int _numObjectsPromoted;
1013     int _numWordsPromoted;
1014     int _numObjectsAllocated;
1015     int _numWordsAllocated;
1016   )
1017 
1018   // Used for sizing decisions
1019   bool _incremental_collection_failed;
1020   bool incremental_collection_failed() {
1021     return _incremental_collection_failed;
1022   }
1023   void set_incremental_collection_failed() {
1024     _incremental_collection_failed = true;
1025   }
1026   void clear_incremental_collection_failed() {
1027     _incremental_collection_failed = false;
1028   }
1029 
1030   // accessors
1031   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1032   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1033 
1034  private:
1035   // For parallel young-gen GC support.
1036   CMSParGCThreadState** _par_gc_thread_states;
1037 
1038   // Reason generation was expanded
1039   CMSExpansionCause::Cause _expansion_cause;
1040 
1041   // In support of MinChunkSize being larger than min object size
1042   const double _dilatation_factor;
1043 
1044   enum CollectionTypes {
1045     Concurrent_collection_type          = 0,
1046     MS_foreground_collection_type       = 1,
1047     MSC_foreground_collection_type      = 2,
1048     Unknown_collection_type             = 3
1049   };
1050 
1051   CollectionTypes _debug_collection_type;
1052 
1053   // Fraction of current occupancy at which to start a CMS collection which
1054   // will collect this generation (at least).
1055   double _initiating_occupancy;
1056 
1057  protected:
1058   // Shrink generation by specified size (returns false if unable to shrink)
1059   virtual void shrink_by(size_t bytes);
1060 
1061   // Update statistics for GC
1062   virtual void update_gc_stats(int level, bool full);
1063 
1064   // Maximum available space in the generation (including uncommitted)
1065   // space.
1066   size_t max_available() const;
1067 
1068   // getter and initializer for _initiating_occupancy field.
1069   double initiating_occupancy() const { return _initiating_occupancy; }
1070   void   init_initiating_occupancy(intx io, intx tr);
1071 
1072  public:
1073   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1074                                 int level, CardTableRS* ct,
1075                                 bool use_adaptive_freelists,
1076                                 FreeBlockDictionary::DictionaryChoice);
1077 
1078   // Accessors
1079   CMSCollector* collector() const { return _collector; }
1080   static void set_collector(CMSCollector* collector) {
1081     assert(_collector == NULL, "already set");
1082     _collector = collector;
1083   }
1084   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1085 
1086   Mutex* freelistLock() const;
1087 
1088   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1089 
1090   // Adaptive size policy
1091   CMSAdaptiveSizePolicy* size_policy();
1092 
1093   bool refs_discovery_is_atomic() const { return false; }
1094   bool refs_discovery_is_mt()     const {
1095     // Note: CMS does MT-discovery during the parallel-remark
1096     // phases. Use ReferenceProcessorMTMutator to make refs
1097     // discovery MT-safe during such phases or other parallel
1098     // discovery phases in the future. This may all go away
1099     // if/when we decide that refs discovery is sufficiently
1100     // rare that the cost of the CAS's involved is in the
1101     // noise. That's a measurement that should be done, and
1102     // the code simplified if that turns out to be the case.
1103     return false;
1104   }
1105 
1106   // Override
1107   virtual void ref_processor_init();
1108 
1109   // Grow generation by specified size (returns false if unable to grow)
1110   bool grow_by(size_t bytes);
1111   // Grow generation to reserved size.
1112   bool grow_to_reserved();
1113 
1114   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1115 
1116   // Space enquiries
1117   size_t capacity() const;
1118   size_t used() const;
1119   size_t free() const;
1120   double occupancy() const { return ((double)used())/((double)capacity()); }
1121   size_t contiguous_available() const;
1122   size_t unsafe_max_alloc_nogc() const;
1123 
1124   // over-rides
1125   MemRegion used_region() const;
1126   MemRegion used_region_at_save_marks() const;
1127 
1128   // Does a "full" (forced) collection invoked on this generation collect
1129   // all younger generations as well? Note that the second conjunct is a
1130   // hack to allow the collection of the younger gen first if the flag is
1131   // set. This is better than using th policy's should_collect_gen0_first()
1132   // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1133   virtual bool full_collects_younger_generations() const {
1134     return UseCMSCompactAtFullCollection && !CollectGen0First;
1135   }
1136 
1137   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1138 
1139   // Support for compaction
1140   CompactibleSpace* first_compaction_space() const;
1141   // Adjust quantites in the generation affected by
1142   // the compaction.
1143   void reset_after_compaction();
1144 
1145   // Allocation support
1146   HeapWord* allocate(size_t size, bool tlab);
1147   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1148   oop       promote(oop obj, size_t obj_size);
1149   HeapWord* par_allocate(size_t size, bool tlab) {
1150     return allocate(size, tlab);
1151   }
1152 
1153   // Incremental mode triggering.
1154   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1155                                      size_t word_size);
1156 
1157   // Used by CMSStats to track direct allocation.  The value is sampled and
1158   // reset after each young gen collection.
1159   size_t direct_allocated_words() const { return _direct_allocated_words; }
1160   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1161 
1162   // Overrides for parallel promotion.
1163   virtual oop par_promote(int thread_num,
1164                           oop obj, markOop m, size_t word_sz);
1165   // This one should not be called for CMS.
1166   virtual void par_promote_alloc_undo(int thread_num,
1167                                       HeapWord* obj, size_t word_sz);
1168   virtual void par_promote_alloc_done(int thread_num);
1169   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1170 
1171   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1172     bool younger_handles_promotion_failure) const;
1173 
1174   // Inform this (non-young) generation that a promotion failure was
1175   // encountered during a collection of a younger generation that
1176   // promotes into this generation.
1177   virtual void promotion_failure_occurred();
1178 
1179   bool should_collect(bool full, size_t size, bool tlab);
1180   virtual bool should_concurrent_collect() const;
1181   virtual bool is_too_full() const;
1182   void collect(bool   full,
1183                bool   clear_all_soft_refs,
1184                size_t size,
1185                bool   tlab);
1186 
1187   HeapWord* expand_and_allocate(size_t word_size,
1188                                 bool tlab,
1189                                 bool parallel = false);
1190 
1191   // GC prologue and epilogue
1192   void gc_prologue(bool full);
1193   void gc_prologue_work(bool full, bool registerClosure,
1194                         ModUnionClosure* modUnionClosure);
1195   void gc_epilogue(bool full);
1196   void gc_epilogue_work(bool full);
1197 
1198   // Time since last GC of this generation
1199   jlong time_of_last_gc(jlong now) {
1200     return collector()->time_of_last_gc(now);
1201   }
1202   void update_time_of_last_gc(jlong now) {
1203     collector()-> update_time_of_last_gc(now);
1204   }
1205 
1206   // Allocation failure
1207   void expand(size_t bytes, size_t expand_bytes,
1208     CMSExpansionCause::Cause cause);
1209   virtual bool expand(size_t bytes, size_t expand_bytes);
1210   void shrink(size_t bytes);
1211   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1212   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1213 
1214   // Iteration support and related enquiries
1215   void save_marks();
1216   bool no_allocs_since_save_marks();
1217   void object_iterate_since_last_GC(ObjectClosure* cl);
1218   void younger_refs_iterate(OopsInGenClosure* cl);
1219 
1220   // Iteration support specific to CMS generations
1221   void save_sweep_limit();
1222 
1223   // More iteration support
1224   virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1225   virtual void oop_iterate(OopClosure* cl);
1226   virtual void safe_object_iterate(ObjectClosure* cl);
1227   virtual void object_iterate(ObjectClosure* cl);
1228 
1229   // Need to declare the full complement of closures, whether we'll
1230   // override them or not, or get message from the compiler:
1231   //   oop_since_save_marks_iterate_nv hides virtual function...
1232   #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1233     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1234   ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1235 
1236   // Smart allocation  XXX -- move to CFLSpace?
1237   void setNearLargestChunk();
1238   bool isNearLargestChunk(HeapWord* addr);
1239 
1240   // Get the chunk at the end of the space.  Delagates to
1241   // the space.
1242   FreeChunk* find_chunk_at_end();
1243 
1244   // Overriding of unused functionality (sharing not yet supported with CMS)
1245   void pre_adjust_pointers();
1246   void post_compact();
1247 
1248   // Debugging
1249   void prepare_for_verify();
1250   void verify(bool allow_dirty);
1251   void print_statistics()               PRODUCT_RETURN;
1252 
1253   // Performance Counters support
1254   virtual void update_counters();
1255   virtual void update_counters(size_t used);
1256   void initialize_performance_counters();
1257   CollectorCounters* counters()  { return collector()->counters(); }
1258 
1259   // Support for parallel remark of survivor space
1260   void* get_data_recorder(int thr_num) {
1261     //Delegate to collector
1262     return collector()->get_data_recorder(thr_num);
1263   }
1264 
1265   // Printing
1266   const char* name() const;
1267   virtual const char* short_name() const { return "CMS"; }
1268   void        print() const;
1269   void printOccupancy(const char* s);
1270   bool must_be_youngest() const { return false; }
1271   bool must_be_oldest()   const { return true; }
1272 
1273   void compute_new_size();
1274 
1275   CollectionTypes debug_collection_type() { return _debug_collection_type; }
1276   void rotate_debug_collection_type();
1277 };
1278 
1279 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
1280 
1281   // Return the size policy from the heap's collector
1282   // policy casted to CMSAdaptiveSizePolicy*.
1283   CMSAdaptiveSizePolicy* cms_size_policy() const;
1284 
1285   // Resize the generation based on the adaptive size
1286   // policy.
1287   void resize(size_t cur_promo, size_t desired_promo);
1288 
1289   // Return the GC counters from the collector policy
1290   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1291 
1292   virtual void shrink_by(size_t bytes);
1293 
1294  public:
1295   virtual void compute_new_size();
1296   ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1297                                   int level, CardTableRS* ct,
1298                                   bool use_adaptive_freelists,
1299                                   FreeBlockDictionary::DictionaryChoice
1300                                     dictionaryChoice) :
1301     ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1302       use_adaptive_freelists, dictionaryChoice) {}
1303 
1304   virtual const char* short_name() const { return "ASCMS"; }
1305   virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1306 
1307   virtual void update_counters();
1308   virtual void update_counters(size_t used);
1309 };
1310 
1311 //
1312 // Closures of various sorts used by CMS to accomplish its work
1313 //
1314 
1315 // This closure is used to check that a certain set of oops is empty.
1316 class FalseClosure: public OopClosure {
1317  public:
1318   void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
1319   void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
1320 };
1321 
1322 // This closure is used to do concurrent marking from the roots
1323 // following the first checkpoint.
1324 class MarkFromRootsClosure: public BitMapClosure {
1325   CMSCollector*  _collector;
1326   MemRegion      _span;
1327   CMSBitMap*     _bitMap;
1328   CMSBitMap*     _mut;
1329   CMSMarkStack*  _markStack;
1330   CMSMarkStack*  _revisitStack;
1331   bool           _yield;
1332   int            _skipBits;
1333   HeapWord*      _finger;
1334   HeapWord*      _threshold;
1335   DEBUG_ONLY(bool _verifying;)
1336 
1337  public:
1338   MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1339                        CMSBitMap* bitMap,
1340                        CMSMarkStack*  markStack,
1341                        CMSMarkStack*  revisitStack,
1342                        bool should_yield, bool verifying = false);
1343   bool do_bit(size_t offset);
1344   void reset(HeapWord* addr);
1345   inline void do_yield_check();
1346 
1347  private:
1348   void scanOopsInOop(HeapWord* ptr);
1349   void do_yield_work();
1350 };
1351 
1352 // This closure is used to do concurrent multi-threaded
1353 // marking from the roots following the first checkpoint.
1354 // XXX This should really be a subclass of The serial version
1355 // above, but i have not had the time to refactor things cleanly.
1356 // That willbe done for Dolphin.
1357 class Par_MarkFromRootsClosure: public BitMapClosure {
1358   CMSCollector*  _collector;
1359   MemRegion      _whole_span;
1360   MemRegion      _span;
1361   CMSBitMap*     _bit_map;
1362   CMSBitMap*     _mut;
1363   OopTaskQueue*  _work_queue;
1364   CMSMarkStack*  _overflow_stack;
1365   CMSMarkStack*  _revisit_stack;
1366   bool           _yield;
1367   int            _skip_bits;
1368   HeapWord*      _finger;
1369   HeapWord*      _threshold;
1370   CMSConcMarkingTask* _task;
1371  public:
1372   Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1373                        MemRegion span,
1374                        CMSBitMap* bit_map,
1375                        OopTaskQueue* work_queue,
1376                        CMSMarkStack*  overflow_stack,
1377                        CMSMarkStack*  revisit_stack,
1378                        bool should_yield);
1379   bool do_bit(size_t offset);
1380   inline void do_yield_check();
1381 
1382  private:
1383   void scan_oops_in_oop(HeapWord* ptr);
1384   void do_yield_work();
1385   bool get_work_from_overflow_stack();
1386 };
1387 
1388 // The following closures are used to do certain kinds of verification of
1389 // CMS marking.
1390 class PushAndMarkVerifyClosure: public OopClosure {
1391   CMSCollector*    _collector;
1392   MemRegion        _span;
1393   CMSBitMap*       _verification_bm;
1394   CMSBitMap*       _cms_bm;
1395   CMSMarkStack*    _mark_stack;
1396  protected:
1397   void do_oop(oop p);
1398   template <class T> inline void do_oop_work(T *p) {
1399     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1400     do_oop(obj);
1401   }
1402  public:
1403   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1404                            MemRegion span,
1405                            CMSBitMap* verification_bm,
1406                            CMSBitMap* cms_bm,
1407                            CMSMarkStack*  mark_stack);
1408   void do_oop(oop* p);
1409   void do_oop(narrowOop* p);
1410   // Deal with a stack overflow condition
1411   void handle_stack_overflow(HeapWord* lost);
1412 };
1413 
1414 class MarkFromRootsVerifyClosure: public BitMapClosure {
1415   CMSCollector*  _collector;
1416   MemRegion      _span;
1417   CMSBitMap*     _verification_bm;
1418   CMSBitMap*     _cms_bm;
1419   CMSMarkStack*  _mark_stack;
1420   HeapWord*      _finger;
1421   PushAndMarkVerifyClosure _pam_verify_closure;
1422  public:
1423   MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1424                              CMSBitMap* verification_bm,
1425                              CMSBitMap* cms_bm,
1426                              CMSMarkStack*  mark_stack);
1427   bool do_bit(size_t offset);
1428   void reset(HeapWord* addr);
1429 };
1430 
1431 
1432 // This closure is used to check that a certain set of bits is
1433 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1434 class FalseBitMapClosure: public BitMapClosure {
1435  public:
1436   bool do_bit(size_t offset) {
1437     guarantee(false, "Should not have a 1 bit");
1438     return true;
1439   }
1440 };
1441 
1442 // This closure is used during the second checkpointing phase
1443 // to rescan the marked objects on the dirty cards in the mod
1444 // union table and the card table proper. It's invoked via
1445 // MarkFromDirtyCardsClosure below. It uses either
1446 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1447 // declared in genOopClosures.hpp to accomplish some of its work.
1448 // In the parallel case the bitMap is shared, so access to
1449 // it needs to be suitably synchronized for updates by embedded
1450 // closures that update it; however, this closure itself only
1451 // reads the bit_map and because it is idempotent, is immune to
1452 // reading stale values.
1453 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1454   #ifdef ASSERT
1455     CMSCollector*          _collector;
1456     MemRegion              _span;
1457     union {
1458       CMSMarkStack*        _mark_stack;
1459       OopTaskQueue*        _work_queue;
1460     };
1461   #endif // ASSERT
1462   bool                       _parallel;
1463   CMSBitMap*                 _bit_map;
1464   union {
1465     MarkRefsIntoAndScanClosure*     _scan_closure;
1466     Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1467   };
1468 
1469  public:
1470   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1471                                 MemRegion span,
1472                                 ReferenceProcessor* rp,
1473                                 CMSBitMap* bit_map,
1474                                 CMSMarkStack*  mark_stack,
1475                                 CMSMarkStack*  revisit_stack,
1476                                 MarkRefsIntoAndScanClosure* cl):
1477     #ifdef ASSERT
1478       _collector(collector),
1479       _span(span),
1480       _mark_stack(mark_stack),
1481     #endif // ASSERT
1482     _parallel(false),
1483     _bit_map(bit_map),
1484     _scan_closure(cl) { }
1485 
1486   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1487                                 MemRegion span,
1488                                 ReferenceProcessor* rp,
1489                                 CMSBitMap* bit_map,
1490                                 OopTaskQueue* work_queue,
1491                                 CMSMarkStack* revisit_stack,
1492                                 Par_MarkRefsIntoAndScanClosure* cl):
1493     #ifdef ASSERT
1494       _collector(collector),
1495       _span(span),
1496       _work_queue(work_queue),
1497     #endif // ASSERT
1498     _parallel(true),
1499     _bit_map(bit_map),
1500     _par_scan_closure(cl) { }
1501 
1502   void do_object(oop obj) {
1503     guarantee(false, "Call do_object_b(oop, MemRegion) instead");
1504   }
1505   bool do_object_b(oop obj) {
1506     guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1507     return false;
1508   }
1509   bool do_object_bm(oop p, MemRegion mr);
1510 };
1511 
1512 // This closure is used during the second checkpointing phase
1513 // to rescan the marked objects on the dirty cards in the mod
1514 // union table and the card table proper. It invokes
1515 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1516 // In the parallel case, the bit map is shared and requires
1517 // synchronized access.
1518 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1519   CompactibleFreeListSpace*      _space;
1520   ScanMarkedObjectsAgainClosure  _scan_cl;
1521   size_t                         _num_dirty_cards;
1522 
1523  public:
1524   MarkFromDirtyCardsClosure(CMSCollector* collector,
1525                             MemRegion span,
1526                             CompactibleFreeListSpace* space,
1527                             CMSBitMap* bit_map,
1528                             CMSMarkStack* mark_stack,
1529                             CMSMarkStack* revisit_stack,
1530                             MarkRefsIntoAndScanClosure* cl):
1531     _space(space),
1532     _num_dirty_cards(0),
1533     _scan_cl(collector, span, collector->ref_processor(), bit_map,
1534                  mark_stack, revisit_stack, cl) { }
1535 
1536   MarkFromDirtyCardsClosure(CMSCollector* collector,
1537                             MemRegion span,
1538                             CompactibleFreeListSpace* space,
1539                             CMSBitMap* bit_map,
1540                             OopTaskQueue* work_queue,
1541                             CMSMarkStack* revisit_stack,
1542                             Par_MarkRefsIntoAndScanClosure* cl):
1543     _space(space),
1544     _num_dirty_cards(0),
1545     _scan_cl(collector, span, collector->ref_processor(), bit_map,
1546              work_queue, revisit_stack, cl) { }
1547 
1548   void do_MemRegion(MemRegion mr);
1549   void set_space(CompactibleFreeListSpace* space) { _space = space; }
1550   size_t num_dirty_cards() { return _num_dirty_cards; }
1551 };
1552 
1553 // This closure is used in the non-product build to check
1554 // that there are no MemRegions with a certain property.
1555 class FalseMemRegionClosure: public MemRegionClosure {
1556   void do_MemRegion(MemRegion mr) {
1557     guarantee(!mr.is_empty(), "Shouldn't be empty");
1558     guarantee(false, "Should never be here");
1559   }
1560 };
1561 
1562 // This closure is used during the precleaning phase
1563 // to "carefully" rescan marked objects on dirty cards.
1564 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1565 // to accomplish some of its work.
1566 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1567   CMSCollector*                  _collector;
1568   MemRegion                      _span;
1569   bool                           _yield;
1570   Mutex*                         _freelistLock;
1571   CMSBitMap*                     _bitMap;
1572   CMSMarkStack*                  _markStack;
1573   MarkRefsIntoAndScanClosure*    _scanningClosure;
1574 
1575  public:
1576   ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1577                                          MemRegion     span,
1578                                          CMSBitMap* bitMap,
1579                                          CMSMarkStack*  markStack,
1580                                          CMSMarkStack*  revisitStack,
1581                                          MarkRefsIntoAndScanClosure* cl,
1582                                          bool should_yield):
1583     _collector(collector),
1584     _span(span),
1585     _yield(should_yield),
1586     _bitMap(bitMap),
1587     _markStack(markStack),
1588     _scanningClosure(cl) {
1589   }
1590 
1591   void do_object(oop p) {
1592     guarantee(false, "call do_object_careful instead");
1593   }
1594 
1595   size_t      do_object_careful(oop p) {
1596     guarantee(false, "Unexpected caller");
1597     return 0;
1598   }
1599 
1600   size_t      do_object_careful_m(oop p, MemRegion mr);
1601 
1602   void setFreelistLock(Mutex* m) {
1603     _freelistLock = m;
1604     _scanningClosure->set_freelistLock(m);
1605   }
1606 
1607  private:
1608   inline bool do_yield_check();
1609 
1610   void do_yield_work();
1611 };
1612 
1613 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1614   CMSCollector*                  _collector;
1615   MemRegion                      _span;
1616   bool                           _yield;
1617   CMSBitMap*                     _bit_map;
1618   CMSMarkStack*                  _mark_stack;
1619   PushAndMarkClosure*            _scanning_closure;
1620   unsigned int                   _before_count;
1621 
1622  public:
1623   SurvivorSpacePrecleanClosure(CMSCollector* collector,
1624                                MemRegion     span,
1625                                CMSBitMap*    bit_map,
1626                                CMSMarkStack* mark_stack,
1627                                PushAndMarkClosure* cl,
1628                                unsigned int  before_count,
1629                                bool          should_yield):
1630     _collector(collector),
1631     _span(span),
1632     _yield(should_yield),
1633     _bit_map(bit_map),
1634     _mark_stack(mark_stack),
1635     _scanning_closure(cl),
1636     _before_count(before_count)
1637   { }
1638 
1639   void do_object(oop p) {
1640     guarantee(false, "call do_object_careful instead");
1641   }
1642 
1643   size_t      do_object_careful(oop p);
1644 
1645   size_t      do_object_careful_m(oop p, MemRegion mr) {
1646     guarantee(false, "Unexpected caller");
1647     return 0;
1648   }
1649 
1650  private:
1651   inline void do_yield_check();
1652   void do_yield_work();
1653 };
1654 
1655 // This closure is used to accomplish the sweeping work
1656 // after the second checkpoint but before the concurrent reset
1657 // phase.
1658 //
1659 // Terminology
1660 //   left hand chunk (LHC) - block of one or more chunks currently being
1661 //     coalesced.  The LHC is available for coalescing with a new chunk.
1662 //   right hand chunk (RHC) - block that is currently being swept that is
1663 //     free or garbage that can be coalesced with the LHC.
1664 // _inFreeRange is true if there is currently a LHC
1665 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1666 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1667 // _freeFinger is the address of the current LHC
1668 class SweepClosure: public BlkClosureCareful {
1669   CMSCollector*                  _collector;  // collector doing the work
1670   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
1671   CompactibleFreeListSpace*      _sp;   // Space being swept
1672   HeapWord*                      _limit;
1673   Mutex*                         _freelistLock; // Free list lock (in space)
1674   CMSBitMap*                     _bitMap;       // Marking bit map (in
1675                                                 // generation)
1676   bool                           _inFreeRange;  // Indicates if we are in the
1677                                                 // midst of a free run
1678   bool                           _freeRangeInFreeLists;
1679                                         // Often, we have just found
1680                                         // a free chunk and started
1681                                         // a new free range; we do not
1682                                         // eagerly remove this chunk from
1683                                         // the free lists unless there is
1684                                         // a possibility of coalescing.
1685                                         // When true, this flag indicates
1686                                         // that the _freeFinger below
1687                                         // points to a potentially free chunk
1688                                         // that may still be in the free lists
1689   bool                           _lastFreeRangeCoalesced;
1690                                         // free range contains chunks
1691                                         // coalesced
1692   bool                           _yield;
1693                                         // Whether sweeping should be
1694                                         // done with yields. For instance
1695                                         // when done by the foreground
1696                                         // collector we shouldn't yield.
1697   HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
1698                                                 // pointer to the "left hand
1699                                                 // chunk"
1700   size_t                         _freeRangeSize;
1701                                         // When _inFreeRange is set, this
1702                                         // indicates the accumulated size
1703                                         // of the "left hand chunk"
1704   NOT_PRODUCT(
1705     size_t                       _numObjectsFreed;
1706     size_t                       _numWordsFreed;
1707     size_t                       _numObjectsLive;
1708     size_t                       _numWordsLive;
1709     size_t                       _numObjectsAlreadyFree;
1710     size_t                       _numWordsAlreadyFree;
1711     FreeChunk*                   _last_fc;
1712   )
1713  private:
1714   // Code that is common to a free chunk or garbage when
1715   // encountered during sweeping.
1716   void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
1717                                   size_t chunkSize);
1718   // Process a free chunk during sweeping.
1719   void doAlreadyFreeChunk(FreeChunk *fc);
1720   // Process a garbage chunk during sweeping.
1721   size_t doGarbageChunk(FreeChunk *fc);
1722   // Process a live chunk during sweeping.
1723   size_t doLiveChunk(FreeChunk* fc);
1724 
1725   // Accessors.
1726   HeapWord* freeFinger() const          { return _freeFinger; }
1727   void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
1728   size_t freeRangeSize() const          { return _freeRangeSize; }
1729   void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
1730   bool inFreeRange()    const           { return _inFreeRange; }
1731   void set_inFreeRange(bool v)          { _inFreeRange = v; }
1732   bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
1733   void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1734   bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
1735   void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1736 
1737   // Initialize a free range.
1738   void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1739   // Return this chunk to the free lists.
1740   void flushCurFreeChunk(HeapWord* chunk, size_t size);
1741 
1742   // Check if we should yield and do so when necessary.
1743   inline void do_yield_check(HeapWord* addr);
1744 
1745   // Yield
1746   void do_yield_work(HeapWord* addr);
1747 
1748   // Debugging/Printing
1749   void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
1750 
1751  public:
1752   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1753                CMSBitMap* bitMap, bool should_yield);
1754   ~SweepClosure();
1755 
1756   size_t       do_blk_careful(HeapWord* addr);
1757 };
1758 
1759 // Closures related to weak references processing
1760 
1761 // During CMS' weak reference processing, this is a
1762 // work-routine/closure used to complete transitive
1763 // marking of objects as live after a certain point
1764 // in which an initial set has been completely accumulated.
1765 // This closure is currently used both during the final
1766 // remark stop-world phase, as well as during the concurrent
1767 // precleaning of the discovered reference lists.
1768 class CMSDrainMarkingStackClosure: public VoidClosure {
1769   CMSCollector*        _collector;
1770   MemRegion            _span;
1771   CMSMarkStack*        _mark_stack;
1772   CMSBitMap*           _bit_map;
1773   CMSKeepAliveClosure* _keep_alive;
1774   bool                 _concurrent_precleaning;
1775  public:
1776   CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1777                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1778                       CMSKeepAliveClosure* keep_alive,
1779                       bool cpc):
1780     _collector(collector),
1781     _span(span),
1782     _bit_map(bit_map),
1783     _mark_stack(mark_stack),
1784     _keep_alive(keep_alive),
1785     _concurrent_precleaning(cpc) {
1786     assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1787            "Mismatch");
1788   }
1789 
1790   void do_void();
1791 };
1792 
1793 // A parallel version of CMSDrainMarkingStackClosure above.
1794 class CMSParDrainMarkingStackClosure: public VoidClosure {
1795   CMSCollector*           _collector;
1796   MemRegion               _span;
1797   OopTaskQueue*           _work_queue;
1798   CMSBitMap*              _bit_map;
1799   CMSInnerParMarkAndPushClosure _mark_and_push;
1800 
1801  public:
1802   CMSParDrainMarkingStackClosure(CMSCollector* collector,
1803                                  MemRegion span, CMSBitMap* bit_map,
1804                                  CMSMarkStack* revisit_stack,
1805                                  OopTaskQueue* work_queue):
1806     _collector(collector),
1807     _span(span),
1808     _bit_map(bit_map),
1809     _work_queue(work_queue),
1810     _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
1811 
1812  public:
1813   void trim_queue(uint max);
1814   void do_void();
1815 };
1816 
1817 // Allow yielding or short-circuiting of reference list
1818 // prelceaning work.
1819 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1820   CMSCollector* _collector;
1821   void do_yield_work();
1822  public:
1823   CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1824     _collector(collector) {}
1825   virtual bool should_return();
1826 };
1827 
1828 
1829 // Convenience class that locks free list locks for given CMS collector
1830 class FreelistLocker: public StackObj {
1831  private:
1832   CMSCollector* _collector;
1833  public:
1834   FreelistLocker(CMSCollector* collector):
1835     _collector(collector) {
1836     _collector->getFreelistLocks();
1837   }
1838 
1839   ~FreelistLocker() {
1840     _collector->releaseFreelistLocks();
1841   }
1842 };
1843 
1844 // Mark all dead objects in a given space.
1845 class MarkDeadObjectsClosure: public BlkClosure {
1846   const CMSCollector*             _collector;
1847   const CompactibleFreeListSpace* _sp;
1848   CMSBitMap*                      _live_bit_map;
1849   CMSBitMap*                      _dead_bit_map;
1850 public:
1851   MarkDeadObjectsClosure(const CMSCollector* collector,
1852                          const CompactibleFreeListSpace* sp,
1853                          CMSBitMap *live_bit_map,
1854                          CMSBitMap *dead_bit_map) :
1855     _collector(collector),
1856     _sp(sp),
1857     _live_bit_map(live_bit_map),
1858     _dead_bit_map(dead_bit_map) {}
1859   size_t do_blk(HeapWord* addr);
1860 };