1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gSpaceCounters.hpp"
  30 #include "gc_implementation/shared/gcStats.hpp"
  31 #include "gc_implementation/shared/gcWhen.hpp"
  32 #include "gc_implementation/shared/generationCounters.hpp"
  33 #include "memory/freeBlockDictionary.hpp"
  34 #include "memory/generation.hpp"
  35 #include "memory/iterator.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/virtualspace.hpp"
  38 #include "services/memoryService.hpp"
  39 #include "utilities/bitMap.inline.hpp"
  40 #include "utilities/stack.inline.hpp"
  41 #include "utilities/taskqueue.hpp"
  42 #include "utilities/yieldingWorkgroup.hpp"
  43 
  44 // ConcurrentMarkSweepGeneration is in support of a concurrent
  45 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
  46 // style. We assume, for now, that this generation is always the
  47 // seniormost generation and for simplicity
  48 // in the first implementation, that this generation is a single compactible
  49 // space. Neither of these restrictions appears essential, and will be
  50 // relaxed in the future when more time is available to implement the
  51 // greater generality (and there's a need for it).
  52 //
  53 // Concurrent mode failures are currently handled by
  54 // means of a sliding mark-compact.
  55 
  56 class AdaptiveSizePolicy;
  57 class CMSConcMarkingTask;
  58 class CMSGCAdaptivePolicyCounters;
  59 class CMSTracer;
  60 class ConcurrentGCTimer;
  61 class ConcurrentMarkSweepGeneration;
  62 class ConcurrentMarkSweepPolicy;
  63 class ConcurrentMarkSweepThread;
  64 class CompactibleFreeListSpace;
  65 class FreeChunk;
  66 class PromotionInfo;
  67 class ScanMarkedObjectsAgainCarefullyClosure;
  68 class TenuredGeneration;
  69 class SerialOldTracer;
  70 
  71 // A generic CMS bit map. It's the basis for both the CMS marking bit map
  72 // as well as for the mod union table (in each case only a subset of the
  73 // methods are used). This is essentially a wrapper around the BitMap class,
  74 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
  75 // we have _shifter == 0. and for the mod union table we have
  76 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
  77 // XXX 64-bit issues in BitMap?
  78 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
  79   friend class VMStructs;
  80 
  81   HeapWord* _bmStartWord;   // base address of range covered by map
  82   size_t    _bmWordSize;    // map size (in #HeapWords covered)
  83   const int _shifter;       // shifts to convert HeapWord to bit position
  84   VirtualSpace _virtual_space; // underlying the bit map
  85   BitMap    _bm;            // the bit map itself
  86  public:
  87   Mutex* const _lock;       // mutex protecting _bm;
  88 
  89  public:
  90   // constructor
  91   CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
  92 
  93   // allocates the actual storage for the map
  94   bool allocate(MemRegion mr);
  95   // field getter
  96   Mutex* lock() const { return _lock; }
  97   // locking verifier convenience function
  98   void assert_locked() const PRODUCT_RETURN;
  99 
 100   // inquiries
 101   HeapWord* startWord()   const { return _bmStartWord; }
 102   size_t    sizeInWords() const { return _bmWordSize;  }
 103   size_t    sizeInBits()  const { return _bm.size();   }
 104   // the following is one past the last word in space
 105   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
 106 
 107   // reading marks
 108   bool isMarked(HeapWord* addr) const;
 109   bool par_isMarked(HeapWord* addr) const; // do not lock checks
 110   bool isUnmarked(HeapWord* addr) const;
 111   bool isAllClear() const;
 112 
 113   // writing marks
 114   void mark(HeapWord* addr);
 115   // For marking by parallel GC threads;
 116   // returns true if we did, false if another thread did
 117   bool par_mark(HeapWord* addr);
 118 
 119   void mark_range(MemRegion mr);
 120   void par_mark_range(MemRegion mr);
 121   void mark_large_range(MemRegion mr);
 122   void par_mark_large_range(MemRegion mr);
 123   void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
 124   void clear_range(MemRegion mr);
 125   void par_clear_range(MemRegion mr);
 126   void clear_large_range(MemRegion mr);
 127   void par_clear_large_range(MemRegion mr);
 128   void clear_all();
 129   void clear_all_incrementally();  // Not yet implemented!!
 130 
 131   NOT_PRODUCT(
 132     // checks the memory region for validity
 133     void region_invariant(MemRegion mr);
 134   )
 135 
 136   // iteration
 137   void iterate(BitMapClosure* cl) {
 138     _bm.iterate(cl);
 139   }
 140   void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
 141   void dirty_range_iterate_clear(MemRegionClosure* cl);
 142   void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
 143 
 144   // auxiliary support for iteration
 145   HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
 146   HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
 147                                             HeapWord* end_addr) const;
 148   HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
 149   HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
 150                                               HeapWord* end_addr) const;
 151   MemRegion getAndClearMarkedRegion(HeapWord* addr);
 152   MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
 153                                            HeapWord* end_addr);
 154 
 155   // conversion utilities
 156   HeapWord* offsetToHeapWord(size_t offset) const;
 157   size_t    heapWordToOffset(HeapWord* addr) const;
 158   size_t    heapWordDiffToOffsetDiff(size_t diff) const;
 159 
 160   void print_on_error(outputStream* st, const char* prefix) const;
 161 
 162   // debugging
 163   // is this address range covered by the bit-map?
 164   NOT_PRODUCT(
 165     bool covers(MemRegion mr) const;
 166     bool covers(HeapWord* start, size_t size = 0) const;
 167   )
 168   void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
 169 };
 170 
 171 // Represents a marking stack used by the CMS collector.
 172 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
 173 class CMSMarkStack: public CHeapObj<mtGC>  {
 174   //
 175   friend class CMSCollector;   // To get at expansion stats further below.
 176   //
 177 
 178   VirtualSpace _virtual_space;  // Space for the stack
 179   oop*   _base;      // Bottom of stack
 180   size_t _index;     // One more than last occupied index
 181   size_t _capacity;  // Max #elements
 182   Mutex  _par_lock;  // An advisory lock used in case of parallel access
 183   NOT_PRODUCT(size_t _max_depth;)  // Max depth plumbed during run
 184 
 185  protected:
 186   size_t _hit_limit;      // We hit max stack size limit
 187   size_t _failed_double;  // We failed expansion before hitting limit
 188 
 189  public:
 190   CMSMarkStack():
 191     _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
 192     _hit_limit(0),
 193     _failed_double(0) {}
 194 
 195   bool allocate(size_t size);
 196 
 197   size_t capacity() const { return _capacity; }
 198 
 199   oop pop() {
 200     if (!isEmpty()) {
 201       return _base[--_index] ;
 202     }
 203     return NULL;
 204   }
 205 
 206   bool push(oop ptr) {
 207     if (isFull()) {
 208       return false;
 209     } else {
 210       _base[_index++] = ptr;
 211       NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
 212       return true;
 213     }
 214   }
 215 
 216   bool isEmpty() const { return _index == 0; }
 217   bool isFull()  const {
 218     assert(_index <= _capacity, "buffer overflow");
 219     return _index == _capacity;
 220   }
 221 
 222   size_t length() { return _index; }
 223 
 224   // "Parallel versions" of some of the above
 225   oop par_pop() {
 226     // lock and pop
 227     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
 228     return pop();
 229   }
 230 
 231   bool par_push(oop ptr) {
 232     // lock and push
 233     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
 234     return push(ptr);
 235   }
 236 
 237   // Forcibly reset the stack, losing all of its contents.
 238   void reset() {
 239     _index = 0;
 240   }
 241 
 242   // Expand the stack, typically in response to an overflow condition.
 243   void expand();
 244 
 245   // Compute the least valued stack element.
 246   oop least_value(HeapWord* low) {
 247      oop least = (oop)low;
 248      for (size_t i = 0; i < _index; i++) {
 249        least = MIN2(least, _base[i]);
 250      }
 251      return least;
 252   }
 253 
 254   // Exposed here to allow stack expansion in || case.
 255   Mutex* par_lock() { return &_par_lock; }
 256 };
 257 
 258 class CardTableRS;
 259 class CMSParGCThreadState;
 260 
 261 class ModUnionClosure: public MemRegionClosure {
 262  protected:
 263   CMSBitMap* _t;
 264  public:
 265   ModUnionClosure(CMSBitMap* t): _t(t) { }
 266   void do_MemRegion(MemRegion mr);
 267 };
 268 
 269 class ModUnionClosurePar: public ModUnionClosure {
 270  public:
 271   ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
 272   void do_MemRegion(MemRegion mr);
 273 };
 274 
 275 // Survivor Chunk Array in support of parallelization of
 276 // Survivor Space rescan.
 277 class ChunkArray: public CHeapObj<mtGC> {
 278   size_t _index;
 279   size_t _capacity;
 280   size_t _overflows;
 281   HeapWord** _array;   // storage for array
 282 
 283  public:
 284   ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
 285   ChunkArray(HeapWord** a, size_t c):
 286     _index(0), _capacity(c), _overflows(0), _array(a) {}
 287 
 288   HeapWord** array() { return _array; }
 289   void set_array(HeapWord** a) { _array = a; }
 290 
 291   size_t capacity() { return _capacity; }
 292   void set_capacity(size_t c) { _capacity = c; }
 293 
 294   size_t end() {
 295     assert(_index <= capacity(),
 296            err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
 297                    _index, _capacity));
 298     return _index;
 299   }  // exclusive
 300 
 301   HeapWord* nth(size_t n) {
 302     assert(n < end(), "Out of bounds access");
 303     return _array[n];
 304   }
 305 
 306   void reset() {
 307     _index = 0;
 308     if (_overflows > 0 && PrintCMSStatistics > 1) {
 309       warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
 310               _capacity, _overflows);
 311     }
 312     _overflows = 0;
 313   }
 314 
 315   void record_sample(HeapWord* p, size_t sz) {
 316     // For now we do not do anything with the size
 317     if (_index < _capacity) {
 318       _array[_index++] = p;
 319     } else {
 320       ++_overflows;
 321       assert(_index == _capacity,
 322              err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
 323                      "): out of bounds at overflow#" SIZE_FORMAT,
 324                      _index, _capacity, _overflows));
 325     }
 326   }
 327 };
 328 
 329 //
 330 // Timing, allocation and promotion statistics for gc scheduling and incremental
 331 // mode pacing.  Most statistics are exponential averages.
 332 //
 333 class CMSStats VALUE_OBJ_CLASS_SPEC {
 334  private:
 335   ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
 336 
 337   // The following are exponential averages with factor alpha:
 338   //   avg = (100 - alpha) * avg + alpha * cur_sample
 339   //
 340   //   The durations measure:  end_time[n] - start_time[n]
 341   //   The periods measure:    start_time[n] - start_time[n-1]
 342   //
 343   // The cms period and duration include only concurrent collections; time spent
 344   // in foreground cms collections due to System.gc() or because of a failure to
 345   // keep up are not included.
 346   //
 347   // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
 348   // real value, but is used only after the first period.  A value of 100 is
 349   // used for the first sample so it gets the entire weight.
 350   unsigned int _saved_alpha; // 0-100
 351   unsigned int _gc0_alpha;
 352   unsigned int _cms_alpha;
 353 
 354   double _gc0_duration;
 355   double _gc0_period;
 356   size_t _gc0_promoted;         // bytes promoted per gc0
 357   double _cms_duration;
 358   double _cms_duration_pre_sweep; // time from initiation to start of sweep
 359   double _cms_period;
 360   size_t _cms_allocated;        // bytes of direct allocation per gc0 period
 361 
 362   // Timers.
 363   elapsedTimer _cms_timer;
 364   TimeStamp    _gc0_begin_time;
 365   TimeStamp    _cms_begin_time;
 366   TimeStamp    _cms_end_time;
 367 
 368   // Snapshots of the amount used in the CMS generation.
 369   size_t _cms_used_at_gc0_begin;
 370   size_t _cms_used_at_gc0_end;
 371   size_t _cms_used_at_cms_begin;
 372 
 373   // Used to prevent the duty cycle from being reduced in the middle of a cms
 374   // cycle.
 375   bool _allow_duty_cycle_reduction;
 376 
 377   enum {
 378     _GC0_VALID = 0x1,
 379     _CMS_VALID = 0x2,
 380     _ALL_VALID = _GC0_VALID | _CMS_VALID
 381   };
 382 
 383   unsigned int _valid_bits;
 384 
 385  protected:
 386   // In support of adjusting of cms trigger ratios based on history
 387   // of concurrent mode failure.
 388   double cms_free_adjustment_factor(size_t free) const;
 389   void   adjust_cms_free_adjustment_factor(bool fail, size_t free);
 390 
 391  public:
 392   CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
 393            unsigned int alpha = CMSExpAvgFactor);
 394 
 395   // Whether or not the statistics contain valid data; higher level statistics
 396   // cannot be called until this returns true (they require at least one young
 397   // gen and one cms cycle to have completed).
 398   bool valid() const;
 399 
 400   // Record statistics.
 401   void record_gc0_begin();
 402   void record_gc0_end(size_t cms_gen_bytes_used);
 403   void record_cms_begin();
 404   void record_cms_end();
 405 
 406   // Allow management of the cms timer, which must be stopped/started around
 407   // yield points.
 408   elapsedTimer& cms_timer()     { return _cms_timer; }
 409   void start_cms_timer()        { _cms_timer.start(); }
 410   void stop_cms_timer()         { _cms_timer.stop(); }
 411 
 412   // Basic statistics; units are seconds or bytes.
 413   double gc0_period() const     { return _gc0_period; }
 414   double gc0_duration() const   { return _gc0_duration; }
 415   size_t gc0_promoted() const   { return _gc0_promoted; }
 416   double cms_period() const          { return _cms_period; }
 417   double cms_duration() const        { return _cms_duration; }
 418   size_t cms_allocated() const       { return _cms_allocated; }
 419 
 420   size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
 421 
 422   // Seconds since the last background cms cycle began or ended.
 423   double cms_time_since_begin() const;
 424   double cms_time_since_end() const;
 425 
 426   // Higher level statistics--caller must check that valid() returns true before
 427   // calling.
 428 
 429   // Returns bytes promoted per second of wall clock time.
 430   double promotion_rate() const;
 431 
 432   // Returns bytes directly allocated per second of wall clock time.
 433   double cms_allocation_rate() const;
 434 
 435   // Rate at which space in the cms generation is being consumed (sum of the
 436   // above two).
 437   double cms_consumption_rate() const;
 438 
 439   // Returns an estimate of the number of seconds until the cms generation will
 440   // fill up, assuming no collection work is done.
 441   double time_until_cms_gen_full() const;
 442 
 443   // Returns an estimate of the number of seconds remaining until
 444   // the cms generation collection should start.
 445   double time_until_cms_start() const;
 446 
 447   // End of higher level statistics.
 448 
 449   // Debugging.
 450   void print_on(outputStream* st) const PRODUCT_RETURN;
 451   void print() const { print_on(gclog_or_tty); }
 452 };
 453 
 454 // A closure related to weak references processing which
 455 // we embed in the CMSCollector, since we need to pass
 456 // it to the reference processor for secondary filtering
 457 // of references based on reachability of referent;
 458 // see role of _is_alive_non_header closure in the
 459 // ReferenceProcessor class.
 460 // For objects in the CMS generation, this closure checks
 461 // if the object is "live" (reachable). Used in weak
 462 // reference processing.
 463 class CMSIsAliveClosure: public BoolObjectClosure {
 464   const MemRegion  _span;
 465   const CMSBitMap* _bit_map;
 466 
 467   friend class CMSCollector;
 468  public:
 469   CMSIsAliveClosure(MemRegion span,
 470                     CMSBitMap* bit_map):
 471     _span(span),
 472     _bit_map(bit_map) {
 473     assert(!span.is_empty(), "Empty span could spell trouble");
 474   }
 475 
 476   bool do_object_b(oop obj);
 477 };
 478 
 479 
 480 // Implements AbstractRefProcTaskExecutor for CMS.
 481 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 482 public:
 483 
 484   CMSRefProcTaskExecutor(CMSCollector& collector)
 485     : _collector(collector)
 486   { }
 487 
 488   // Executes a task using worker threads.
 489   virtual void execute(ProcessTask& task);
 490   virtual void execute(EnqueueTask& task);
 491 private:
 492   CMSCollector& _collector;
 493 };
 494 
 495 
 496 class CMSCollector: public CHeapObj<mtGC> {
 497   friend class VMStructs;
 498   friend class ConcurrentMarkSweepThread;
 499   friend class ConcurrentMarkSweepGeneration;
 500   friend class CompactibleFreeListSpace;
 501   friend class CMSParMarkTask;
 502   friend class CMSParInitialMarkTask;
 503   friend class CMSParRemarkTask;
 504   friend class CMSConcMarkingTask;
 505   friend class CMSRefProcTaskProxy;
 506   friend class CMSRefProcTaskExecutor;
 507   friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
 508   friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
 509   friend class PushOrMarkClosure;             // to access _restart_addr
 510   friend class Par_PushOrMarkClosure;             // to access _restart_addr
 511   friend class MarkFromRootsClosure;          //  -- ditto --
 512                                               // ... and for clearing cards
 513   friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
 514                                               // ... and for clearing cards
 515   friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
 516   friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
 517   friend class PushAndMarkVerifyClosure;      //  -- ditto --
 518   friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
 519   friend class PushAndMarkClosure;            //  -- ditto --
 520   friend class Par_PushAndMarkClosure;        //  -- ditto --
 521   friend class CMSKeepAliveClosure;           //  -- ditto --
 522   friend class CMSDrainMarkingStackClosure;   //  -- ditto --
 523   friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
 524   NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
 525   friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
 526   friend class VM_CMS_Operation;
 527   friend class VM_CMS_Initial_Mark;
 528   friend class VM_CMS_Final_Remark;
 529   friend class TraceCMSMemoryManagerStats;
 530 
 531  private:
 532   jlong _time_of_last_gc;
 533   void update_time_of_last_gc(jlong now) {
 534     _time_of_last_gc = now;
 535   }
 536 
 537   OopTaskQueueSet* _task_queues;
 538 
 539   // Overflow list of grey objects, threaded through mark-word
 540   // Manipulated with CAS in the parallel/multi-threaded case.
 541   oop _overflow_list;
 542   // The following array-pair keeps track of mark words
 543   // displaced for accommodating overflow list above.
 544   // This code will likely be revisited under RFE#4922830.
 545   Stack<oop, mtGC>     _preserved_oop_stack;
 546   Stack<markOop, mtGC> _preserved_mark_stack;
 547 
 548   int*             _hash_seed;
 549 
 550   // In support of multi-threaded concurrent phases
 551   YieldingFlexibleWorkGang* _conc_workers;
 552 
 553   // Performance Counters
 554   CollectorCounters* _gc_counters;
 555 
 556   // Initialization Errors
 557   bool _completed_initialization;
 558 
 559   // In support of ExplicitGCInvokesConcurrent
 560   static bool _full_gc_requested;
 561   static GCCause::Cause _full_gc_cause;
 562   unsigned int _collection_count_start;
 563 
 564   // Should we unload classes this concurrent cycle?
 565   bool _should_unload_classes;
 566   unsigned int  _concurrent_cycles_since_last_unload;
 567   unsigned int concurrent_cycles_since_last_unload() const {
 568     return _concurrent_cycles_since_last_unload;
 569   }
 570   // Did we (allow) unload classes in the previous concurrent cycle?
 571   bool unloaded_classes_last_cycle() const {
 572     return concurrent_cycles_since_last_unload() == 0;
 573   }
 574   // Root scanning options for perm gen
 575   int _roots_scanning_options;
 576   int roots_scanning_options() const      { return _roots_scanning_options; }
 577   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
 578   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
 579 
 580   // Verification support
 581   CMSBitMap     _verification_mark_bm;
 582   void verify_after_remark_work_1();
 583   void verify_after_remark_work_2();
 584 
 585   // True if any verification flag is on.
 586   bool _verifying;
 587   bool verifying() const { return _verifying; }
 588   void set_verifying(bool v) { _verifying = v; }
 589 
 590   // Collector policy
 591   ConcurrentMarkSweepPolicy* _collector_policy;
 592   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 593 
 594   void set_did_compact(bool v);
 595 
 596   // XXX Move these to CMSStats ??? FIX ME !!!
 597   elapsedTimer _inter_sweep_timer;   // Time between sweeps
 598   elapsedTimer _intra_sweep_timer;   // Time _in_ sweeps
 599   // Padded decaying average estimates of the above
 600   AdaptivePaddedAverage _inter_sweep_estimate;
 601   AdaptivePaddedAverage _intra_sweep_estimate;
 602 
 603   CMSTracer* _gc_tracer_cm;
 604   ConcurrentGCTimer* _gc_timer_cm;
 605 
 606   bool _cms_start_registered;
 607 
 608   GCHeapSummary _last_heap_summary;
 609   MetaspaceSummary _last_metaspace_summary;
 610 
 611   void register_foreground_gc_start(GCCause::Cause cause);
 612   void register_gc_start(GCCause::Cause cause);
 613   void register_gc_end();
 614   void save_heap_summary();
 615   void report_heap_summary(GCWhen::Type when);
 616 
 617  protected:
 618   ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
 619   MemRegion                      _span;    // Span covering above two
 620   CardTableRS*                   _ct;      // Card table
 621 
 622   // CMS marking support structures
 623   CMSBitMap     _markBitMap;
 624   CMSBitMap     _modUnionTable;
 625   CMSMarkStack  _markStack;
 626 
 627   HeapWord*     _restart_addr; // In support of marking stack overflow
 628   void          lower_restart_addr(HeapWord* low);
 629 
 630   // Counters in support of marking stack / work queue overflow handling:
 631   // a non-zero value indicates certain types of overflow events during
 632   // the current CMS cycle and could lead to stack resizing efforts at
 633   // an opportune future time.
 634   size_t        _ser_pmc_preclean_ovflw;
 635   size_t        _ser_pmc_remark_ovflw;
 636   size_t        _par_pmc_remark_ovflw;
 637   size_t        _ser_kac_preclean_ovflw;
 638   size_t        _ser_kac_ovflw;
 639   size_t        _par_kac_ovflw;
 640   NOT_PRODUCT(ssize_t _num_par_pushes;)
 641 
 642   // ("Weak") Reference processing support.
 643   ReferenceProcessor*            _ref_processor;
 644   CMSIsAliveClosure              _is_alive_closure;
 645   // Keep this textually after _markBitMap and _span; c'tor dependency.
 646 
 647   ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
 648   ModUnionClosure    _modUnionClosure;
 649   ModUnionClosurePar _modUnionClosurePar;
 650 
 651   // CMS abstract state machine
 652   // initial_state: Idling
 653   // next_state(Idling)            = {Marking}
 654   // next_state(Marking)           = {Precleaning, Sweeping}
 655   // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
 656   // next_state(AbortablePreclean) = {FinalMarking}
 657   // next_state(FinalMarking)      = {Sweeping}
 658   // next_state(Sweeping)          = {Resizing}
 659   // next_state(Resizing)          = {Resetting}
 660   // next_state(Resetting)         = {Idling}
 661   // The numeric values below are chosen so that:
 662   // . _collectorState <= Idling ==  post-sweep && pre-mark
 663   // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
 664   //                                            precleaning || abortablePrecleanb
 665  public:
 666   enum CollectorState {
 667     Resizing            = 0,
 668     Resetting           = 1,
 669     Idling              = 2,
 670     InitialMarking      = 3,
 671     Marking             = 4,
 672     Precleaning         = 5,
 673     AbortablePreclean   = 6,
 674     FinalMarking        = 7,
 675     Sweeping            = 8
 676   };
 677  protected:
 678   static CollectorState _collectorState;
 679 
 680   // State related to prologue/epilogue invocation for my generations
 681   bool _between_prologue_and_epilogue;
 682 
 683   // Signaling/State related to coordination between fore- and background GC
 684   // Note: When the baton has been passed from background GC to foreground GC,
 685   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
 686   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
 687                                  // wants to go active
 688   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
 689                                  // yet passed the baton to the foreground GC
 690 
 691   // Support for CMSScheduleRemark (abortable preclean)
 692   bool _abort_preclean;
 693   bool _start_sampling;
 694 
 695   int    _numYields;
 696   size_t _numDirtyCards;
 697   size_t _sweep_count;
 698   // Number of full gc's since the last concurrent gc.
 699   uint   _full_gcs_since_conc_gc;
 700 
 701   // Occupancy used for bootstrapping stats
 702   double _bootstrap_occupancy;
 703 
 704   // Timer
 705   elapsedTimer _timer;
 706 
 707   // Timing, allocation and promotion statistics, used for scheduling.
 708   CMSStats      _stats;
 709 
 710   enum CMS_op_type {
 711     CMS_op_checkpointRootsInitial,
 712     CMS_op_checkpointRootsFinal
 713   };
 714 
 715   void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
 716   bool stop_world_and_do(CMS_op_type op);
 717 
 718   OopTaskQueueSet* task_queues() { return _task_queues; }
 719   int*             hash_seed(int i) { return &_hash_seed[i]; }
 720   YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
 721 
 722   // Support for parallelizing Eden rescan in CMS remark phase
 723   void sample_eden(); // ... sample Eden space top
 724 
 725  private:
 726   // Support for parallelizing young gen rescan in CMS remark phase
 727   Generation* _young_gen;  // the younger gen
 728   HeapWord** _top_addr;    // ... Top of Eden
 729   HeapWord** _end_addr;    // ... End of Eden
 730   Mutex*     _eden_chunk_lock;
 731   HeapWord** _eden_chunk_array; // ... Eden partitioning array
 732   size_t     _eden_chunk_index; // ... top (exclusive) of array
 733   size_t     _eden_chunk_capacity;  // ... max entries in array
 734 
 735   // Support for parallelizing survivor space rescan
 736   HeapWord** _survivor_chunk_array;
 737   size_t     _survivor_chunk_index;
 738   size_t     _survivor_chunk_capacity;
 739   size_t*    _cursor;
 740   ChunkArray* _survivor_plab_array;
 741 
 742   // Support for marking stack overflow handling
 743   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
 744   bool par_take_from_overflow_list(size_t num,
 745                                    OopTaskQueue* to_work_q,
 746                                    int no_of_gc_threads);
 747   void push_on_overflow_list(oop p);
 748   void par_push_on_overflow_list(oop p);
 749   // The following is, obviously, not, in general, "MT-stable"
 750   bool overflow_list_is_empty() const;
 751 
 752   void preserve_mark_if_necessary(oop p);
 753   void par_preserve_mark_if_necessary(oop p);
 754   void preserve_mark_work(oop p, markOop m);
 755   void restore_preserved_marks_if_any();
 756   NOT_PRODUCT(bool no_preserved_marks() const;)
 757   // In support of testing overflow code
 758   NOT_PRODUCT(int _overflow_counter;)
 759   NOT_PRODUCT(bool simulate_overflow();)       // Sequential
 760   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 761 
 762   // CMS work methods
 763   void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
 764 
 765   // A return value of false indicates failure due to stack overflow
 766   bool markFromRootsWork(bool asynch);  // Concurrent marking work
 767 
 768  public:   // FIX ME!!! only for testing
 769   bool do_marking_st(bool asynch);      // Single-threaded marking
 770   bool do_marking_mt(bool asynch);      // Multi-threaded  marking
 771 
 772  private:
 773 
 774   // Concurrent precleaning work
 775   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
 776                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
 777   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
 778                              ScanMarkedObjectsAgainCarefullyClosure* cl);
 779   // Does precleaning work, returning a quantity indicative of
 780   // the amount of "useful work" done.
 781   size_t preclean_work(bool clean_refs, bool clean_survivors);
 782   void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
 783   void abortable_preclean(); // Preclean while looking for possible abort
 784   void initialize_sequential_subtasks_for_young_gen_rescan(int i);
 785   // Helper function for above; merge-sorts the per-thread plab samples
 786   void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
 787   // Resets (i.e. clears) the per-thread plab sample vectors
 788   void reset_survivor_plab_arrays();
 789 
 790   // Final (second) checkpoint work
 791   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
 792                                 bool init_mark_was_synchronous);
 793   // Work routine for parallel version of remark
 794   void do_remark_parallel();
 795   // Work routine for non-parallel version of remark
 796   void do_remark_non_parallel();
 797   // Reference processing work routine (during second checkpoint)
 798   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
 799 
 800   // Concurrent sweeping work
 801   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
 802 
 803   // (Concurrent) resetting of support data structures
 804   void reset(bool asynch);
 805 
 806   // Clear _expansion_cause fields of constituent generations
 807   void clear_expansion_cause();
 808 
 809   // An auxiliary method used to record the ends of
 810   // used regions of each generation to limit the extent of sweep
 811   void save_sweep_limits();
 812 
 813   // A work method used by foreground collection to determine
 814   // what type of collection (compacting or not, continuing or fresh)
 815   // it should do.
 816   void decide_foreground_collection_type(bool clear_all_soft_refs,
 817     bool* should_compact, bool* should_start_over);
 818 
 819   // A work method used by the foreground collector to do
 820   // a mark-sweep-compact.
 821   void do_compaction_work(bool clear_all_soft_refs);
 822 
 823   // A work method used by the foreground collector to do
 824   // a mark-sweep, after taking over from a possibly on-going
 825   // concurrent mark-sweep collection.
 826   void do_mark_sweep_work(bool clear_all_soft_refs,
 827     CollectorState first_state, bool should_start_over);
 828 
 829   // Work methods for reporting concurrent mode interruption or failure
 830   bool is_external_interruption();
 831   void report_concurrent_mode_interruption();
 832 
 833   // If the background GC is active, acquire control from the background
 834   // GC and do the collection.
 835   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 836 
 837   // For synchronizing passing of control from background to foreground
 838   // GC.  waitForForegroundGC() is called by the background
 839   // collector.  It if had to wait for a foreground collection,
 840   // it returns true and the background collection should assume
 841   // that the collection was finished by the foreground
 842   // collector.
 843   bool waitForForegroundGC();
 844 
 845   size_t block_size_using_printezis_bits(HeapWord* addr) const;
 846   size_t block_size_if_printezis_bits(HeapWord* addr) const;
 847   HeapWord* next_card_start_after_block(HeapWord* addr) const;
 848 
 849   void setup_cms_unloading_and_verification_state();
 850  public:
 851   CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 852                CardTableRS*                   ct,
 853                ConcurrentMarkSweepPolicy*     cp);
 854   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
 855 
 856   ReferenceProcessor* ref_processor() { return _ref_processor; }
 857   void ref_processor_init();
 858 
 859   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
 860   static CollectorState abstract_state() { return _collectorState;  }
 861 
 862   bool should_abort_preclean() const; // Whether preclean should be aborted.
 863   size_t get_eden_used() const;
 864   size_t get_eden_capacity() const;
 865 
 866   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 867 
 868   // Locking checks
 869   NOT_PRODUCT(static bool have_cms_token();)
 870 
 871   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
 872   bool shouldConcurrentCollect();
 873 
 874   void collect(bool   full,
 875                bool   clear_all_soft_refs,
 876                size_t size,
 877                bool   tlab);
 878   void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
 879   void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
 880 
 881   // In support of ExplicitGCInvokesConcurrent
 882   static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
 883   // Should we unload classes in a particular concurrent cycle?
 884   bool should_unload_classes() const {
 885     return _should_unload_classes;
 886   }
 887   void update_should_unload_classes();
 888 
 889   void direct_allocated(HeapWord* start, size_t size);
 890 
 891   // Object is dead if not marked and current phase is sweeping.
 892   bool is_dead_obj(oop obj) const;
 893 
 894   // After a promotion (of "start"), do any necessary marking.
 895   // If "par", then it's being done by a parallel GC thread.
 896   // The last two args indicate if we need precise marking
 897   // and if so the size of the object so it can be dirtied
 898   // in its entirety.
 899   void promoted(bool par, HeapWord* start,
 900                 bool is_obj_array, size_t obj_size);
 901 
 902   void getFreelistLocks() const;
 903   void releaseFreelistLocks() const;
 904   bool haveFreelistLocks() const;
 905 
 906   // Adjust size of underlying generation
 907   void compute_new_size();
 908 
 909   // GC prologue and epilogue
 910   void gc_prologue(bool full);
 911   void gc_epilogue(bool full);
 912 
 913   jlong time_of_last_gc(jlong now) {
 914     if (_collectorState <= Idling) {
 915       // gc not in progress
 916       return _time_of_last_gc;
 917     } else {
 918       // collection in progress
 919       return now;
 920     }
 921   }
 922 
 923   // Support for parallel remark of survivor space
 924   void* get_data_recorder(int thr_num);
 925   void sample_eden_chunk();
 926 
 927   CMSBitMap* markBitMap()  { return &_markBitMap; }
 928   void directAllocated(HeapWord* start, size_t size);
 929 
 930   // Main CMS steps and related support
 931   void checkpointRootsInitial(bool asynch);
 932   bool markFromRoots(bool asynch);  // a return value of false indicates failure
 933                                     // due to stack overflow
 934   void preclean();
 935   void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
 936                             bool init_mark_was_synchronous);
 937   void sweep(bool asynch);
 938 
 939   // Check that the currently executing thread is the expected
 940   // one (foreground collector or background collector).
 941   static void check_correct_thread_executing() PRODUCT_RETURN;
 942   // XXXPERM void print_statistics()           PRODUCT_RETURN;
 943 
 944   bool is_cms_reachable(HeapWord* addr);
 945 
 946   // Performance Counter Support
 947   CollectorCounters* counters()    { return _gc_counters; }
 948 
 949   // Timer stuff
 950   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
 951   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
 952   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
 953   double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
 954 
 955   int  yields()          { return _numYields; }
 956   void resetYields()     { _numYields = 0;    }
 957   void incrementYields() { _numYields++;      }
 958   void resetNumDirtyCards()               { _numDirtyCards = 0; }
 959   void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
 960   size_t  numDirtyCards()                 { return _numDirtyCards; }
 961 
 962   static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
 963   static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
 964   static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
 965   static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
 966   size_t sweep_count() const             { return _sweep_count; }
 967   void   increment_sweep_count()         { _sweep_count++; }
 968 
 969   // Timers/stats for gc scheduling and incremental mode pacing.
 970   CMSStats& stats() { return _stats; }
 971 
 972   // Adaptive size policy
 973   AdaptiveSizePolicy* size_policy();
 974 
 975   static void print_on_error(outputStream* st);
 976 
 977   // Debugging
 978   void verify();
 979   bool verify_after_remark(bool silent = VerifySilently);
 980   void verify_ok_to_terminate() const PRODUCT_RETURN;
 981   void verify_work_stacks_empty() const PRODUCT_RETURN;
 982   void verify_overflow_empty() const PRODUCT_RETURN;
 983 
 984   // Convenience methods in support of debugging
 985   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
 986   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
 987 
 988   // Accessors
 989   CMSMarkStack* verification_mark_stack() { return &_markStack; }
 990   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
 991 
 992   // Initialization errors
 993   bool completed_initialization() { return _completed_initialization; }
 994 
 995   void print_eden_and_survivor_chunk_arrays();
 996 };
 997 
 998 class CMSExpansionCause : public AllStatic  {
 999  public:
1000   enum Cause {
1001     _no_expansion,
1002     _satisfy_free_ratio,
1003     _satisfy_promotion,
1004     _satisfy_allocation,
1005     _allocate_par_lab,
1006     _allocate_par_spooling_space,
1007     _adaptive_size_policy
1008   };
1009   // Return a string describing the cause of the expansion.
1010   static const char* to_string(CMSExpansionCause::Cause cause);
1011 };
1012 
1013 class ConcurrentMarkSweepGeneration: public CardGeneration {
1014   friend class VMStructs;
1015   friend class ConcurrentMarkSweepThread;
1016   friend class ConcurrentMarkSweep;
1017   friend class CMSCollector;
1018  protected:
1019   static CMSCollector*       _collector; // the collector that collects us
1020   CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
1021 
1022   // Performance Counters
1023   GenerationCounters*      _gen_counters;
1024   GSpaceCounters*          _space_counters;
1025 
1026   // Words directly allocated, used by CMSStats.
1027   size_t _direct_allocated_words;
1028 
1029   // Non-product stat counters
1030   NOT_PRODUCT(
1031     size_t _numObjectsPromoted;
1032     size_t _numWordsPromoted;
1033     size_t _numObjectsAllocated;
1034     size_t _numWordsAllocated;
1035   )
1036 
1037   // Used for sizing decisions
1038   bool _incremental_collection_failed;
1039   bool incremental_collection_failed() {
1040     return _incremental_collection_failed;
1041   }
1042   void set_incremental_collection_failed() {
1043     _incremental_collection_failed = true;
1044   }
1045   void clear_incremental_collection_failed() {
1046     _incremental_collection_failed = false;
1047   }
1048 
1049   // accessors
1050   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1051   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1052 
1053  private:
1054   // For parallel young-gen GC support.
1055   CMSParGCThreadState** _par_gc_thread_states;
1056 
1057   // Reason generation was expanded
1058   CMSExpansionCause::Cause _expansion_cause;
1059 
1060   // In support of MinChunkSize being larger than min object size
1061   const double _dilatation_factor;
1062 
1063   enum CollectionTypes {
1064     Concurrent_collection_type          = 0,
1065     MS_foreground_collection_type       = 1,
1066     MSC_foreground_collection_type      = 2,
1067     Unknown_collection_type             = 3
1068   };
1069 
1070   CollectionTypes _debug_collection_type;
1071 
1072   // True if a compacting collection was done.
1073   bool _did_compact;
1074   bool did_compact() { return _did_compact; }
1075 
1076   // Fraction of current occupancy at which to start a CMS collection which
1077   // will collect this generation (at least).
1078   double _initiating_occupancy;
1079 
1080  protected:
1081   // Shrink generation by specified size (returns false if unable to shrink)
1082   void shrink_free_list_by(size_t bytes);
1083 
1084   // Update statistics for GC
1085   virtual void update_gc_stats(int level, bool full);
1086 
1087   // Maximum available space in the generation (including uncommitted)
1088   // space.
1089   size_t max_available() const;
1090 
1091   // getter and initializer for _initiating_occupancy field.
1092   double initiating_occupancy() const { return _initiating_occupancy; }
1093   void   init_initiating_occupancy(intx io, uintx tr);
1094 
1095  public:
1096   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1097                                 int level, CardTableRS* ct,
1098                                 bool use_adaptive_freelists,
1099                                 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
1100 
1101   // Accessors
1102   CMSCollector* collector() const { return _collector; }
1103   static void set_collector(CMSCollector* collector) {
1104     assert(_collector == NULL, "already set");
1105     _collector = collector;
1106   }
1107   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
1108 
1109   Mutex* freelistLock() const;
1110 
1111   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1112 
1113   void set_did_compact(bool v) { _did_compact = v; }
1114 
1115   bool refs_discovery_is_atomic() const { return false; }
1116   bool refs_discovery_is_mt()     const {
1117     // Note: CMS does MT-discovery during the parallel-remark
1118     // phases. Use ReferenceProcessorMTMutator to make refs
1119     // discovery MT-safe during such phases or other parallel
1120     // discovery phases in the future. This may all go away
1121     // if/when we decide that refs discovery is sufficiently
1122     // rare that the cost of the CAS's involved is in the
1123     // noise. That's a measurement that should be done, and
1124     // the code simplified if that turns out to be the case.
1125     return ConcGCThreads > 1;
1126   }
1127 
1128   // Override
1129   virtual void ref_processor_init();
1130 
1131   // Grow generation by specified size (returns false if unable to grow)
1132   bool grow_by(size_t bytes);
1133   // Grow generation to reserved size.
1134   bool grow_to_reserved();
1135 
1136   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1137 
1138   // Space enquiries
1139   size_t capacity() const;
1140   size_t used() const;
1141   size_t free() const;
1142   double occupancy() const { return ((double)used())/((double)capacity()); }
1143   size_t contiguous_available() const;
1144   size_t unsafe_max_alloc_nogc() const;
1145 
1146   // over-rides
1147   MemRegion used_region() const;
1148   MemRegion used_region_at_save_marks() const;
1149 
1150   // Does a "full" (forced) collection invoked on this generation collect
1151   // all younger generations as well? Note that the second conjunct is a
1152   // hack to allow the collection of the younger gen first if the flag is
1153   // set.
1154   virtual bool full_collects_younger_generations() const {
1155     return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC;
1156   }
1157 
1158   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1159 
1160   // Support for compaction
1161   CompactibleSpace* first_compaction_space() const;
1162   // Adjust quantities in the generation affected by
1163   // the compaction.
1164   void reset_after_compaction();
1165 
1166   // Allocation support
1167   HeapWord* allocate(size_t size, bool tlab);
1168   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1169   oop       promote(oop obj, size_t obj_size);
1170   HeapWord* par_allocate(size_t size, bool tlab) {
1171     return allocate(size, tlab);
1172   }
1173 
1174 
1175   // Used by CMSStats to track direct allocation.  The value is sampled and
1176   // reset after each young gen collection.
1177   size_t direct_allocated_words() const { return _direct_allocated_words; }
1178   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
1179 
1180   // Overrides for parallel promotion.
1181   virtual oop par_promote(int thread_num,
1182                           oop obj, markOop m, size_t word_sz);
1183   // This one should not be called for CMS.
1184   virtual void par_promote_alloc_undo(int thread_num,
1185                                       HeapWord* obj, size_t word_sz);
1186   virtual void par_promote_alloc_done(int thread_num);
1187   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1188 
1189   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1190 
1191   // Inform this (non-young) generation that a promotion failure was
1192   // encountered during a collection of a younger generation that
1193   // promotes into this generation.
1194   virtual void promotion_failure_occurred();
1195 
1196   bool should_collect(bool full, size_t size, bool tlab);
1197   virtual bool should_concurrent_collect() const;
1198   virtual bool is_too_full() const;
1199   void collect(bool   full,
1200                bool   clear_all_soft_refs,
1201                size_t size,
1202                bool   tlab);
1203 
1204   HeapWord* expand_and_allocate(size_t word_size,
1205                                 bool tlab,
1206                                 bool parallel = false);
1207 
1208   // GC prologue and epilogue
1209   void gc_prologue(bool full);
1210   void gc_prologue_work(bool full, bool registerClosure,
1211                         ModUnionClosure* modUnionClosure);
1212   void gc_epilogue(bool full);
1213   void gc_epilogue_work(bool full);
1214 
1215   // Time since last GC of this generation
1216   jlong time_of_last_gc(jlong now) {
1217     return collector()->time_of_last_gc(now);
1218   }
1219   void update_time_of_last_gc(jlong now) {
1220     collector()-> update_time_of_last_gc(now);
1221   }
1222 
1223   // Allocation failure
1224   void expand(size_t bytes, size_t expand_bytes,
1225     CMSExpansionCause::Cause cause);
1226   virtual bool expand(size_t bytes, size_t expand_bytes);
1227   void shrink(size_t bytes);
1228   void shrink_by(size_t bytes);
1229   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1230   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1231 
1232   // Iteration support and related enquiries
1233   void save_marks();
1234   bool no_allocs_since_save_marks();
1235   void younger_refs_iterate(OopsInGenClosure* cl);
1236 
1237   // Iteration support specific to CMS generations
1238   void save_sweep_limit();
1239 
1240   // More iteration support
1241   virtual void oop_iterate(ExtendedOopClosure* cl);
1242   virtual void safe_object_iterate(ObjectClosure* cl);
1243   virtual void object_iterate(ObjectClosure* cl);
1244 
1245   // Need to declare the full complement of closures, whether we'll
1246   // override them or not, or get message from the compiler:
1247   //   oop_since_save_marks_iterate_nv hides virtual function...
1248   #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1249     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1250   ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1251 
1252   // Smart allocation  XXX -- move to CFLSpace?
1253   void setNearLargestChunk();
1254   bool isNearLargestChunk(HeapWord* addr);
1255 
1256   // Get the chunk at the end of the space.  Delegates to
1257   // the space.
1258   FreeChunk* find_chunk_at_end();
1259 
1260   void post_compact();
1261 
1262   // Debugging
1263   void prepare_for_verify();
1264   void verify();
1265   void print_statistics()               PRODUCT_RETURN;
1266 
1267   // Performance Counters support
1268   virtual void update_counters();
1269   virtual void update_counters(size_t used);
1270   void initialize_performance_counters();
1271   CollectorCounters* counters()  { return collector()->counters(); }
1272 
1273   // Support for parallel remark of survivor space
1274   void* get_data_recorder(int thr_num) {
1275     //Delegate to collector
1276     return collector()->get_data_recorder(thr_num);
1277   }
1278   void sample_eden_chunk() {
1279     //Delegate to collector
1280     return collector()->sample_eden_chunk();
1281   }
1282 
1283   // Printing
1284   const char* name() const;
1285   virtual const char* short_name() const { return "CMS"; }
1286   void        print() const;
1287   void printOccupancy(const char* s);
1288   bool must_be_youngest() const { return false; }
1289   bool must_be_oldest()   const { return true; }
1290 
1291   // Resize the generation after a compacting GC.  The
1292   // generation can be treated as a contiguous space
1293   // after the compaction.
1294   virtual void compute_new_size();
1295   // Resize the generation after a non-compacting
1296   // collection.
1297   void compute_new_size_free_list();
1298 
1299   CollectionTypes debug_collection_type() { return _debug_collection_type; }
1300   void rotate_debug_collection_type();
1301 };
1302 
1303 //
1304 // Closures of various sorts used by CMS to accomplish its work
1305 //
1306 
1307 // This closure is used to do concurrent marking from the roots
1308 // following the first checkpoint.
1309 class MarkFromRootsClosure: public BitMapClosure {
1310   CMSCollector*  _collector;
1311   MemRegion      _span;
1312   CMSBitMap*     _bitMap;
1313   CMSBitMap*     _mut;
1314   CMSMarkStack*  _markStack;
1315   bool           _yield;
1316   int            _skipBits;
1317   HeapWord*      _finger;
1318   HeapWord*      _threshold;
1319   DEBUG_ONLY(bool _verifying;)
1320 
1321  public:
1322   MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1323                        CMSBitMap* bitMap,
1324                        CMSMarkStack*  markStack,
1325                        bool should_yield, bool verifying = false);
1326   bool do_bit(size_t offset);
1327   void reset(HeapWord* addr);
1328   inline void do_yield_check();
1329 
1330  private:
1331   void scanOopsInOop(HeapWord* ptr);
1332   void do_yield_work();
1333 };
1334 
1335 // This closure is used to do concurrent multi-threaded
1336 // marking from the roots following the first checkpoint.
1337 // XXX This should really be a subclass of The serial version
1338 // above, but i have not had the time to refactor things cleanly.
1339 class Par_MarkFromRootsClosure: public BitMapClosure {
1340   CMSCollector*  _collector;
1341   MemRegion      _whole_span;
1342   MemRegion      _span;
1343   CMSBitMap*     _bit_map;
1344   CMSBitMap*     _mut;
1345   OopTaskQueue*  _work_queue;
1346   CMSMarkStack*  _overflow_stack;
1347   bool           _yield;
1348   int            _skip_bits;
1349   HeapWord*      _finger;
1350   HeapWord*      _threshold;
1351   CMSConcMarkingTask* _task;
1352  public:
1353   Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1354                        MemRegion span,
1355                        CMSBitMap* bit_map,
1356                        OopTaskQueue* work_queue,
1357                        CMSMarkStack*  overflow_stack,
1358                        bool should_yield);
1359   bool do_bit(size_t offset);
1360   inline void do_yield_check();
1361 
1362  private:
1363   void scan_oops_in_oop(HeapWord* ptr);
1364   void do_yield_work();
1365   bool get_work_from_overflow_stack();
1366 };
1367 
1368 // The following closures are used to do certain kinds of verification of
1369 // CMS marking.
1370 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
1371   CMSCollector*    _collector;
1372   MemRegion        _span;
1373   CMSBitMap*       _verification_bm;
1374   CMSBitMap*       _cms_bm;
1375   CMSMarkStack*    _mark_stack;
1376  protected:
1377   void do_oop(oop p);
1378   template <class T> inline void do_oop_work(T *p) {
1379     oop obj = oopDesc::load_decode_heap_oop(p);
1380     do_oop(obj);
1381   }
1382  public:
1383   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1384                            MemRegion span,
1385                            CMSBitMap* verification_bm,
1386                            CMSBitMap* cms_bm,
1387                            CMSMarkStack*  mark_stack);
1388   void do_oop(oop* p);
1389   void do_oop(narrowOop* p);
1390 
1391   // Deal with a stack overflow condition
1392   void handle_stack_overflow(HeapWord* lost);
1393 };
1394 
1395 class MarkFromRootsVerifyClosure: public BitMapClosure {
1396   CMSCollector*  _collector;
1397   MemRegion      _span;
1398   CMSBitMap*     _verification_bm;
1399   CMSBitMap*     _cms_bm;
1400   CMSMarkStack*  _mark_stack;
1401   HeapWord*      _finger;
1402   PushAndMarkVerifyClosure _pam_verify_closure;
1403  public:
1404   MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1405                              CMSBitMap* verification_bm,
1406                              CMSBitMap* cms_bm,
1407                              CMSMarkStack*  mark_stack);
1408   bool do_bit(size_t offset);
1409   void reset(HeapWord* addr);
1410 };
1411 
1412 
1413 // This closure is used to check that a certain set of bits is
1414 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1415 class FalseBitMapClosure: public BitMapClosure {
1416  public:
1417   bool do_bit(size_t offset) {
1418     guarantee(false, "Should not have a 1 bit");
1419     return true;
1420   }
1421 };
1422 
1423 // A version of ObjectClosure with "memory" (see _previous_address below)
1424 class UpwardsObjectClosure: public BoolObjectClosure {
1425   HeapWord* _previous_address;
1426  public:
1427   UpwardsObjectClosure() : _previous_address(NULL) { }
1428   void set_previous(HeapWord* addr) { _previous_address = addr; }
1429   HeapWord* previous()              { return _previous_address; }
1430   // A return value of "true" can be used by the caller to decide
1431   // if this object's end should *NOT* be recorded in
1432   // _previous_address above.
1433   virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
1434 };
1435 
1436 // This closure is used during the second checkpointing phase
1437 // to rescan the marked objects on the dirty cards in the mod
1438 // union table and the card table proper. It's invoked via
1439 // MarkFromDirtyCardsClosure below. It uses either
1440 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1441 // declared in genOopClosures.hpp to accomplish some of its work.
1442 // In the parallel case the bitMap is shared, so access to
1443 // it needs to be suitably synchronized for updates by embedded
1444 // closures that update it; however, this closure itself only
1445 // reads the bit_map and because it is idempotent, is immune to
1446 // reading stale values.
1447 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1448   #ifdef ASSERT
1449     CMSCollector*          _collector;
1450     MemRegion              _span;
1451     union {
1452       CMSMarkStack*        _mark_stack;
1453       OopTaskQueue*        _work_queue;
1454     };
1455   #endif // ASSERT
1456   bool                       _parallel;
1457   CMSBitMap*                 _bit_map;
1458   union {
1459     MarkRefsIntoAndScanClosure*     _scan_closure;
1460     Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1461   };
1462 
1463  public:
1464   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1465                                 MemRegion span,
1466                                 ReferenceProcessor* rp,
1467                                 CMSBitMap* bit_map,
1468                                 CMSMarkStack*  mark_stack,
1469                                 MarkRefsIntoAndScanClosure* cl):
1470     #ifdef ASSERT
1471       _collector(collector),
1472       _span(span),
1473       _mark_stack(mark_stack),
1474     #endif // ASSERT
1475     _parallel(false),
1476     _bit_map(bit_map),
1477     _scan_closure(cl) { }
1478 
1479   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1480                                 MemRegion span,
1481                                 ReferenceProcessor* rp,
1482                                 CMSBitMap* bit_map,
1483                                 OopTaskQueue* work_queue,
1484                                 Par_MarkRefsIntoAndScanClosure* cl):
1485     #ifdef ASSERT
1486       _collector(collector),
1487       _span(span),
1488       _work_queue(work_queue),
1489     #endif // ASSERT
1490     _parallel(true),
1491     _bit_map(bit_map),
1492     _par_scan_closure(cl) { }
1493 
1494   bool do_object_b(oop obj) {
1495     guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1496     return false;
1497   }
1498   bool do_object_bm(oop p, MemRegion mr);
1499 };
1500 
1501 // This closure is used during the second checkpointing phase
1502 // to rescan the marked objects on the dirty cards in the mod
1503 // union table and the card table proper. It invokes
1504 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1505 // In the parallel case, the bit map is shared and requires
1506 // synchronized access.
1507 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1508   CompactibleFreeListSpace*      _space;
1509   ScanMarkedObjectsAgainClosure  _scan_cl;
1510   size_t                         _num_dirty_cards;
1511 
1512  public:
1513   MarkFromDirtyCardsClosure(CMSCollector* collector,
1514                             MemRegion span,
1515                             CompactibleFreeListSpace* space,
1516                             CMSBitMap* bit_map,
1517                             CMSMarkStack* mark_stack,
1518                             MarkRefsIntoAndScanClosure* cl):
1519     _space(space),
1520     _num_dirty_cards(0),
1521     _scan_cl(collector, span, collector->ref_processor(), bit_map,
1522                  mark_stack, cl) { }
1523 
1524   MarkFromDirtyCardsClosure(CMSCollector* collector,
1525                             MemRegion span,
1526                             CompactibleFreeListSpace* space,
1527                             CMSBitMap* bit_map,
1528                             OopTaskQueue* work_queue,
1529                             Par_MarkRefsIntoAndScanClosure* cl):
1530     _space(space),
1531     _num_dirty_cards(0),
1532     _scan_cl(collector, span, collector->ref_processor(), bit_map,
1533              work_queue, cl) { }
1534 
1535   void do_MemRegion(MemRegion mr);
1536   void set_space(CompactibleFreeListSpace* space) { _space = space; }
1537   size_t num_dirty_cards() { return _num_dirty_cards; }
1538 };
1539 
1540 // This closure is used in the non-product build to check
1541 // that there are no MemRegions with a certain property.
1542 class FalseMemRegionClosure: public MemRegionClosure {
1543   void do_MemRegion(MemRegion mr) {
1544     guarantee(!mr.is_empty(), "Shouldn't be empty");
1545     guarantee(false, "Should never be here");
1546   }
1547 };
1548 
1549 // This closure is used during the precleaning phase
1550 // to "carefully" rescan marked objects on dirty cards.
1551 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1552 // to accomplish some of its work.
1553 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1554   CMSCollector*                  _collector;
1555   MemRegion                      _span;
1556   bool                           _yield;
1557   Mutex*                         _freelistLock;
1558   CMSBitMap*                     _bitMap;
1559   CMSMarkStack*                  _markStack;
1560   MarkRefsIntoAndScanClosure*    _scanningClosure;
1561 
1562  public:
1563   ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1564                                          MemRegion     span,
1565                                          CMSBitMap* bitMap,
1566                                          CMSMarkStack*  markStack,
1567                                          MarkRefsIntoAndScanClosure* cl,
1568                                          bool should_yield):
1569     _collector(collector),
1570     _span(span),
1571     _yield(should_yield),
1572     _bitMap(bitMap),
1573     _markStack(markStack),
1574     _scanningClosure(cl) {
1575   }
1576 
1577   void do_object(oop p) {
1578     guarantee(false, "call do_object_careful instead");
1579   }
1580 
1581   size_t      do_object_careful(oop p) {
1582     guarantee(false, "Unexpected caller");
1583     return 0;
1584   }
1585 
1586   size_t      do_object_careful_m(oop p, MemRegion mr);
1587 
1588   void setFreelistLock(Mutex* m) {
1589     _freelistLock = m;
1590     _scanningClosure->set_freelistLock(m);
1591   }
1592 
1593  private:
1594   inline bool do_yield_check();
1595 
1596   void do_yield_work();
1597 };
1598 
1599 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1600   CMSCollector*                  _collector;
1601   MemRegion                      _span;
1602   bool                           _yield;
1603   CMSBitMap*                     _bit_map;
1604   CMSMarkStack*                  _mark_stack;
1605   PushAndMarkClosure*            _scanning_closure;
1606   unsigned int                   _before_count;
1607 
1608  public:
1609   SurvivorSpacePrecleanClosure(CMSCollector* collector,
1610                                MemRegion     span,
1611                                CMSBitMap*    bit_map,
1612                                CMSMarkStack* mark_stack,
1613                                PushAndMarkClosure* cl,
1614                                unsigned int  before_count,
1615                                bool          should_yield):
1616     _collector(collector),
1617     _span(span),
1618     _yield(should_yield),
1619     _bit_map(bit_map),
1620     _mark_stack(mark_stack),
1621     _scanning_closure(cl),
1622     _before_count(before_count)
1623   { }
1624 
1625   void do_object(oop p) {
1626     guarantee(false, "call do_object_careful instead");
1627   }
1628 
1629   size_t      do_object_careful(oop p);
1630 
1631   size_t      do_object_careful_m(oop p, MemRegion mr) {
1632     guarantee(false, "Unexpected caller");
1633     return 0;
1634   }
1635 
1636  private:
1637   inline void do_yield_check();
1638   void do_yield_work();
1639 };
1640 
1641 // This closure is used to accomplish the sweeping work
1642 // after the second checkpoint but before the concurrent reset
1643 // phase.
1644 //
1645 // Terminology
1646 //   left hand chunk (LHC) - block of one or more chunks currently being
1647 //     coalesced.  The LHC is available for coalescing with a new chunk.
1648 //   right hand chunk (RHC) - block that is currently being swept that is
1649 //     free or garbage that can be coalesced with the LHC.
1650 // _inFreeRange is true if there is currently a LHC
1651 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1652 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1653 // _freeFinger is the address of the current LHC
1654 class SweepClosure: public BlkClosureCareful {
1655   CMSCollector*                  _collector;  // collector doing the work
1656   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
1657   CompactibleFreeListSpace*      _sp;   // Space being swept
1658   HeapWord*                      _limit;// the address at or above which the sweep should stop
1659                                         // because we do not expect newly garbage blocks
1660                                         // eligible for sweeping past that address.
1661   Mutex*                         _freelistLock; // Free list lock (in space)
1662   CMSBitMap*                     _bitMap;       // Marking bit map (in
1663                                                 // generation)
1664   bool                           _inFreeRange;  // Indicates if we are in the
1665                                                 // midst of a free run
1666   bool                           _freeRangeInFreeLists;
1667                                         // Often, we have just found
1668                                         // a free chunk and started
1669                                         // a new free range; we do not
1670                                         // eagerly remove this chunk from
1671                                         // the free lists unless there is
1672                                         // a possibility of coalescing.
1673                                         // When true, this flag indicates
1674                                         // that the _freeFinger below
1675                                         // points to a potentially free chunk
1676                                         // that may still be in the free lists
1677   bool                           _lastFreeRangeCoalesced;
1678                                         // free range contains chunks
1679                                         // coalesced
1680   bool                           _yield;
1681                                         // Whether sweeping should be
1682                                         // done with yields. For instance
1683                                         // when done by the foreground
1684                                         // collector we shouldn't yield.
1685   HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
1686                                                 // pointer to the "left hand
1687                                                 // chunk"
1688   size_t                         _freeRangeSize;
1689                                         // When _inFreeRange is set, this
1690                                         // indicates the accumulated size
1691                                         // of the "left hand chunk"
1692   NOT_PRODUCT(
1693     size_t                       _numObjectsFreed;
1694     size_t                       _numWordsFreed;
1695     size_t                       _numObjectsLive;
1696     size_t                       _numWordsLive;
1697     size_t                       _numObjectsAlreadyFree;
1698     size_t                       _numWordsAlreadyFree;
1699     FreeChunk*                   _last_fc;
1700   )
1701  private:
1702   // Code that is common to a free chunk or garbage when
1703   // encountered during sweeping.
1704   void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
1705   // Process a free chunk during sweeping.
1706   void do_already_free_chunk(FreeChunk *fc);
1707   // Work method called when processing an already free or a
1708   // freshly garbage chunk to do a lookahead and possibly a
1709   // preemptive flush if crossing over _limit.
1710   void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
1711   // Process a garbage chunk during sweeping.
1712   size_t do_garbage_chunk(FreeChunk *fc);
1713   // Process a live chunk during sweeping.
1714   size_t do_live_chunk(FreeChunk* fc);
1715 
1716   // Accessors.
1717   HeapWord* freeFinger() const          { return _freeFinger; }
1718   void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
1719   bool inFreeRange()    const           { return _inFreeRange; }
1720   void set_inFreeRange(bool v)          { _inFreeRange = v; }
1721   bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
1722   void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1723   bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
1724   void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1725 
1726   // Initialize a free range.
1727   void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1728   // Return this chunk to the free lists.
1729   void flush_cur_free_chunk(HeapWord* chunk, size_t size);
1730 
1731   // Check if we should yield and do so when necessary.
1732   inline void do_yield_check(HeapWord* addr);
1733 
1734   // Yield
1735   void do_yield_work(HeapWord* addr);
1736 
1737   // Debugging/Printing
1738   void print_free_block_coalesced(FreeChunk* fc) const;
1739 
1740  public:
1741   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1742                CMSBitMap* bitMap, bool should_yield);
1743   ~SweepClosure() PRODUCT_RETURN;
1744 
1745   size_t       do_blk_careful(HeapWord* addr);
1746   void         print() const { print_on(tty); }
1747   void         print_on(outputStream *st) const;
1748 };
1749 
1750 // Closures related to weak references processing
1751 
1752 // During CMS' weak reference processing, this is a
1753 // work-routine/closure used to complete transitive
1754 // marking of objects as live after a certain point
1755 // in which an initial set has been completely accumulated.
1756 // This closure is currently used both during the final
1757 // remark stop-world phase, as well as during the concurrent
1758 // precleaning of the discovered reference lists.
1759 class CMSDrainMarkingStackClosure: public VoidClosure {
1760   CMSCollector*        _collector;
1761   MemRegion            _span;
1762   CMSMarkStack*        _mark_stack;
1763   CMSBitMap*           _bit_map;
1764   CMSKeepAliveClosure* _keep_alive;
1765   bool                 _concurrent_precleaning;
1766  public:
1767   CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1768                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1769                       CMSKeepAliveClosure* keep_alive,
1770                       bool cpc):
1771     _collector(collector),
1772     _span(span),
1773     _bit_map(bit_map),
1774     _mark_stack(mark_stack),
1775     _keep_alive(keep_alive),
1776     _concurrent_precleaning(cpc) {
1777     assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1778            "Mismatch");
1779   }
1780 
1781   void do_void();
1782 };
1783 
1784 // A parallel version of CMSDrainMarkingStackClosure above.
1785 class CMSParDrainMarkingStackClosure: public VoidClosure {
1786   CMSCollector*           _collector;
1787   MemRegion               _span;
1788   OopTaskQueue*           _work_queue;
1789   CMSBitMap*              _bit_map;
1790   CMSInnerParMarkAndPushClosure _mark_and_push;
1791 
1792  public:
1793   CMSParDrainMarkingStackClosure(CMSCollector* collector,
1794                                  MemRegion span, CMSBitMap* bit_map,
1795                                  OopTaskQueue* work_queue):
1796     _collector(collector),
1797     _span(span),
1798     _bit_map(bit_map),
1799     _work_queue(work_queue),
1800     _mark_and_push(collector, span, bit_map, work_queue) { }
1801 
1802  public:
1803   void trim_queue(uint max);
1804   void do_void();
1805 };
1806 
1807 // Allow yielding or short-circuiting of reference list
1808 // precleaning work.
1809 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1810   CMSCollector* _collector;
1811   void do_yield_work();
1812  public:
1813   CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1814     _collector(collector) {}
1815   virtual bool should_return();
1816 };
1817 
1818 
1819 // Convenience class that locks free list locks for given CMS collector
1820 class FreelistLocker: public StackObj {
1821  private:
1822   CMSCollector* _collector;
1823  public:
1824   FreelistLocker(CMSCollector* collector):
1825     _collector(collector) {
1826     _collector->getFreelistLocks();
1827   }
1828 
1829   ~FreelistLocker() {
1830     _collector->releaseFreelistLocks();
1831   }
1832 };
1833 
1834 // Mark all dead objects in a given space.
1835 class MarkDeadObjectsClosure: public BlkClosure {
1836   const CMSCollector*             _collector;
1837   const CompactibleFreeListSpace* _sp;
1838   CMSBitMap*                      _live_bit_map;
1839   CMSBitMap*                      _dead_bit_map;
1840 public:
1841   MarkDeadObjectsClosure(const CMSCollector* collector,
1842                          const CompactibleFreeListSpace* sp,
1843                          CMSBitMap *live_bit_map,
1844                          CMSBitMap *dead_bit_map) :
1845     _collector(collector),
1846     _sp(sp),
1847     _live_bit_map(live_bit_map),
1848     _dead_bit_map(dead_bit_map) {}
1849   size_t do_blk(HeapWord* addr);
1850 };
1851 
1852 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
1853 
1854  public:
1855   TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
1856 };
1857 
1858 
1859 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP