1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  27 
  28 #include "gc_implementation/g1/heapRegionSet.hpp"
  29 #include "gc_implementation/shared/gcId.hpp"
  30 #include "utilities/taskqueue.hpp"
  31 
  32 class G1CollectedHeap;
  33 class CMTask;
  34 typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
  35 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
  36 
  37 // Closure used by CM during concurrent reference discovery
  38 // and reference processing (during remarking) to determine
  39 // if a particular object is alive. It is primarily used
  40 // to determine if referents of discovered reference objects
  41 // are alive. An instance is also embedded into the
  42 // reference processor as the _is_alive_non_header field
  43 class G1CMIsAliveClosure: public BoolObjectClosure {
  44   G1CollectedHeap* _g1;
  45  public:
  46   G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
  47 
  48   bool do_object_b(oop obj);
  49 };
  50 
  51 // A generic CM bit map.  This is essentially a wrapper around the BitMap
  52 // class, with one bit per (1<<_shifter) HeapWords.
  53 
  54 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
  55  protected:
  56   HeapWord* _bmStartWord;      // base address of range covered by map
  57   size_t    _bmWordSize;       // map size (in #HeapWords covered)
  58   const int _shifter;          // map to char or bit
  59   VirtualSpace _virtual_space; // underlying the bit map
  60   BitMap    _bm;               // the bit map itself
  61 
  62  public:
  63   // constructor
  64   CMBitMapRO(int shifter);
  65 
  66   enum { do_yield = true };
  67 
  68   // inquiries
  69   HeapWord* startWord()   const { return _bmStartWord; }
  70   size_t    sizeInWords() const { return _bmWordSize;  }
  71   // the following is one past the last word in space
  72   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
  73 
  74   // read marks
  75 
  76   bool isMarked(HeapWord* addr) const {
  77     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  78            "outside underlying space?");
  79     return _bm.at(heapWordToOffset(addr));
  80   }
  81 
  82   // iteration
  83   inline bool iterate(BitMapClosure* cl, MemRegion mr);
  84   inline bool iterate(BitMapClosure* cl);
  85 
  86   // Return the address corresponding to the next marked bit at or after
  87   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
  88   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
  89   HeapWord* getNextMarkedWordAddress(HeapWord* addr,
  90                                      HeapWord* limit = NULL) const;
  91   // Return the address corresponding to the next unmarked bit at or after
  92   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
  93   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
  94   HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
  95                                        HeapWord* limit = NULL) const;
  96 
  97   // conversion utilities
  98   HeapWord* offsetToHeapWord(size_t offset) const {
  99     return _bmStartWord + (offset << _shifter);
 100   }
 101   size_t heapWordToOffset(HeapWord* addr) const {
 102     return pointer_delta(addr, _bmStartWord) >> _shifter;
 103   }
 104   int heapWordDiffToOffsetDiff(size_t diff) const;
 105 
 106   // The argument addr should be the start address of a valid object
 107   HeapWord* nextObject(HeapWord* addr) {
 108     oop obj = (oop) addr;
 109     HeapWord* res =  addr + obj->size();
 110     assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
 111     return res;
 112   }
 113 
 114   void print_on_error(outputStream* st, const char* prefix) const;
 115 
 116   // debugging
 117   NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
 118 };
 119 
 120 class CMBitMap : public CMBitMapRO {
 121 
 122  public:
 123   // constructor
 124   CMBitMap(int shifter) :
 125     CMBitMapRO(shifter) {}
 126 
 127   // Allocates the back store for the marking bitmap
 128   bool allocate(ReservedSpace heap_rs);
 129 
 130   // write marks
 131   void mark(HeapWord* addr) {
 132     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 133            "outside underlying space?");
 134     _bm.set_bit(heapWordToOffset(addr));
 135   }
 136   void clear(HeapWord* addr) {
 137     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 138            "outside underlying space?");
 139     _bm.clear_bit(heapWordToOffset(addr));
 140   }
 141   bool parMark(HeapWord* addr) {
 142     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 143            "outside underlying space?");
 144     return _bm.par_set_bit(heapWordToOffset(addr));
 145   }
 146   bool parClear(HeapWord* addr) {
 147     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 148            "outside underlying space?");
 149     return _bm.par_clear_bit(heapWordToOffset(addr));
 150   }
 151   void markRange(MemRegion mr);
 152   void clearAll();
 153   void clearRange(MemRegion mr);
 154 
 155   // Starting at the bit corresponding to "addr" (inclusive), find the next
 156   // "1" bit, if any.  This bit starts some run of consecutive "1"'s; find
 157   // the end of this run (stopping at "end_addr").  Return the MemRegion
 158   // covering from the start of the region corresponding to the first bit
 159   // of the run to the end of the region corresponding to the last bit of
 160   // the run.  If there is no "1" bit at or after "addr", return an empty
 161   // MemRegion.
 162   MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
 163 };
 164 
 165 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
 166 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
 167   VirtualSpace _virtual_space;   // Underlying backing store for actual stack
 168   ConcurrentMark* _cm;
 169   oop* _base;        // bottom of stack
 170   jint _index;       // one more than last occupied index
 171   jint _capacity;    // max #elements
 172   jint _saved_index; // value of _index saved at start of GC
 173   NOT_PRODUCT(jint _max_depth;)   // max depth plumbed during run
 174 
 175   bool  _overflow;
 176   bool  _should_expand;
 177   DEBUG_ONLY(bool _drain_in_progress;)
 178   DEBUG_ONLY(bool _drain_in_progress_yields;)
 179 
 180  public:
 181   CMMarkStack(ConcurrentMark* cm);
 182   ~CMMarkStack();
 183 
 184 #ifndef PRODUCT
 185   jint max_depth() const {
 186     return _max_depth;
 187   }
 188 #endif
 189 
 190   bool allocate(size_t capacity);
 191 
 192   oop pop() {
 193     if (!isEmpty()) {
 194       return _base[--_index] ;
 195     }
 196     return NULL;
 197   }
 198 
 199   // If overflow happens, don't do the push, and record the overflow.
 200   // *Requires* that "ptr" is already marked.
 201   void push(oop ptr) {
 202     if (isFull()) {
 203       // Record overflow.
 204       _overflow = true;
 205       return;
 206     } else {
 207       _base[_index++] = ptr;
 208       NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
 209     }
 210   }
 211   // Non-block impl.  Note: concurrency is allowed only with other
 212   // "par_push" operations, not with "pop" or "drain".  We would need
 213   // parallel versions of them if such concurrency was desired.
 214   void par_push(oop ptr);
 215 
 216   // Pushes the first "n" elements of "ptr_arr" on the stack.
 217   // Non-block impl.  Note: concurrency is allowed only with other
 218   // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
 219   void par_adjoin_arr(oop* ptr_arr, int n);
 220 
 221   // Pushes the first "n" elements of "ptr_arr" on the stack.
 222   // Locking impl: concurrency is allowed only with
 223   // "par_push_arr" and/or "par_pop_arr" operations, which use the same
 224   // locking strategy.
 225   void par_push_arr(oop* ptr_arr, int n);
 226 
 227   // If returns false, the array was empty.  Otherwise, removes up to "max"
 228   // elements from the stack, and transfers them to "ptr_arr" in an
 229   // unspecified order.  The actual number transferred is given in "n" ("n
 230   // == 0" is deliberately redundant with the return value.)  Locking impl:
 231   // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
 232   // operations, which use the same locking strategy.
 233   bool par_pop_arr(oop* ptr_arr, int max, int* n);
 234 
 235   // Drain the mark stack, applying the given closure to all fields of
 236   // objects on the stack.  (That is, continue until the stack is empty,
 237   // even if closure applications add entries to the stack.)  The "bm"
 238   // argument, if non-null, may be used to verify that only marked objects
 239   // are on the mark stack.  If "yield_after" is "true", then the
 240   // concurrent marker performing the drain offers to yield after
 241   // processing each object.  If a yield occurs, stops the drain operation
 242   // and returns false.  Otherwise, returns true.
 243   template<class OopClosureClass>
 244   bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
 245 
 246   bool isEmpty()    { return _index == 0; }
 247   bool isFull()     { return _index == _capacity; }
 248   int  maxElems()   { return _capacity; }
 249 
 250   bool overflow() { return _overflow; }
 251   void clear_overflow() { _overflow = false; }
 252 
 253   bool should_expand() const { return _should_expand; }
 254   void set_should_expand();
 255 
 256   // Expand the stack, typically in response to an overflow condition
 257   void expand();
 258 
 259   int  size() { return _index; }
 260 
 261   void setEmpty()   { _index = 0; clear_overflow(); }
 262 
 263   // Record the current index.
 264   void note_start_of_gc();
 265 
 266   // Make sure that we have not added any entries to the stack during GC.
 267   void note_end_of_gc();
 268 
 269   // iterate over the oops in the mark stack, up to the bound recorded via
 270   // the call above.
 271   void oops_do(OopClosure* f);
 272 };
 273 
 274 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
 275 private:
 276 #ifndef PRODUCT
 277   uintx _num_remaining;
 278   bool _force;
 279 #endif // !defined(PRODUCT)
 280 
 281 public:
 282   void init() PRODUCT_RETURN;
 283   void update() PRODUCT_RETURN;
 284   bool should_force() PRODUCT_RETURN_( return false; );
 285 };
 286 
 287 // this will enable a variety of different statistics per GC task
 288 #define _MARKING_STATS_       0
 289 // this will enable the higher verbose levels
 290 #define _MARKING_VERBOSE_     0
 291 
 292 #if _MARKING_STATS_
 293 #define statsOnly(statement)  \
 294 do {                          \
 295   statement ;                 \
 296 } while (0)
 297 #else // _MARKING_STATS_
 298 #define statsOnly(statement)  \
 299 do {                          \
 300 } while (0)
 301 #endif // _MARKING_STATS_
 302 
 303 typedef enum {
 304   no_verbose  = 0,   // verbose turned off
 305   stats_verbose,     // only prints stats at the end of marking
 306   low_verbose,       // low verbose, mostly per region and per major event
 307   medium_verbose,    // a bit more detailed than low
 308   high_verbose       // per object verbose
 309 } CMVerboseLevel;
 310 
 311 class YoungList;
 312 
 313 // Root Regions are regions that are not empty at the beginning of a
 314 // marking cycle and which we might collect during an evacuation pause
 315 // while the cycle is active. Given that, during evacuation pauses, we
 316 // do not copy objects that are explicitly marked, what we have to do
 317 // for the root regions is to scan them and mark all objects reachable
 318 // from them. According to the SATB assumptions, we only need to visit
 319 // each object once during marking. So, as long as we finish this scan
 320 // before the next evacuation pause, we can copy the objects from the
 321 // root regions without having to mark them or do anything else to them.
 322 //
 323 // Currently, we only support root region scanning once (at the start
 324 // of the marking cycle) and the root regions are all the survivor
 325 // regions populated during the initial-mark pause.
 326 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
 327 private:
 328   YoungList*           _young_list;
 329   ConcurrentMark*      _cm;
 330 
 331   volatile bool        _scan_in_progress;
 332   volatile bool        _should_abort;
 333   HeapRegion* volatile _next_survivor;
 334 
 335 public:
 336   CMRootRegions();
 337   // We actually do most of the initialization in this method.
 338   void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
 339 
 340   // Reset the claiming / scanning of the root regions.
 341   void prepare_for_scan();
 342 
 343   // Forces get_next() to return NULL so that the iteration aborts early.
 344   void abort() { _should_abort = true; }
 345 
 346   // Return true if the CM thread are actively scanning root regions,
 347   // false otherwise.
 348   bool scan_in_progress() { return _scan_in_progress; }
 349 
 350   // Claim the next root region to scan atomically, or return NULL if
 351   // all have been claimed.
 352   HeapRegion* claim_next();
 353 
 354   // Flag that we're done with root region scanning and notify anyone
 355   // who's waiting on it. If aborted is false, assume that all regions
 356   // have been claimed.
 357   void scan_finished();
 358 
 359   // If CM threads are still scanning root regions, wait until they
 360   // are done. Return true if we had to wait, false otherwise.
 361   bool wait_until_scan_finished();
 362 };
 363 
 364 class ConcurrentMarkThread;
 365 
 366 class ConcurrentMark: public CHeapObj<mtGC> {
 367   friend class CMMarkStack;
 368   friend class ConcurrentMarkThread;
 369   friend class CMTask;
 370   friend class CMBitMapClosure;
 371   friend class CMGlobalObjectClosure;
 372   friend class CMRemarkTask;
 373   friend class CMConcurrentMarkingTask;
 374   friend class G1ParNoteEndTask;
 375   friend class CalcLiveObjectsClosure;
 376   friend class G1CMRefProcTaskProxy;
 377   friend class G1CMRefProcTaskExecutor;
 378   friend class G1CMKeepAliveAndDrainClosure;
 379   friend class G1CMDrainMarkingStackClosure;
 380 
 381 protected:
 382   ConcurrentMarkThread* _cmThread;   // The thread doing the work
 383   G1CollectedHeap*      _g1h;        // The heap
 384   uint                  _parallel_marking_threads; // The number of marking
 385                                                    // threads we're using
 386   uint                  _max_parallel_marking_threads; // Max number of marking
 387                                                        // threads we'll ever use
 388   double                _sleep_factor; // How much we have to sleep, with
 389                                        // respect to the work we just did, to
 390                                        // meet the marking overhead goal
 391   double                _marking_task_overhead; // Marking target overhead for
 392                                                 // a single task
 393 
 394   // Same as the two above, but for the cleanup task
 395   double                _cleanup_sleep_factor;
 396   double                _cleanup_task_overhead;
 397 
 398   FreeRegionList        _cleanup_list;
 399 
 400   // Concurrent marking support structures
 401   CMBitMap                _markBitMap1;
 402   CMBitMap                _markBitMap2;
 403   CMBitMapRO*             _prevMarkBitMap; // Completed mark bitmap
 404   CMBitMap*               _nextMarkBitMap; // Under-construction mark bitmap
 405 
 406   BitMap                  _region_bm;
 407   BitMap                  _card_bm;
 408 
 409   // Heap bounds
 410   HeapWord*               _heap_start;
 411   HeapWord*               _heap_end;
 412 
 413   // Root region tracking and claiming
 414   CMRootRegions           _root_regions;
 415 
 416   // For gray objects
 417   CMMarkStack             _markStack; // Grey objects behind global finger
 418   HeapWord* volatile      _finger;  // The global finger, region aligned,
 419                                     // always points to the end of the
 420                                     // last claimed region
 421 
 422   // Marking tasks
 423   uint                    _max_worker_id;// Maximum worker id
 424   uint                    _active_tasks; // Task num currently active
 425   CMTask**                _tasks;        // Task queue array (max_worker_id len)
 426   CMTaskQueueSet*         _task_queues;  // Task queue set
 427   ParallelTaskTerminator  _terminator;   // For termination
 428 
 429   // Two sync barriers that are used to synchronize tasks when an
 430   // overflow occurs. The algorithm is the following. All tasks enter
 431   // the first one to ensure that they have all stopped manipulating
 432   // the global data structures. After they exit it, they re-initialize
 433   // their data structures and task 0 re-initializes the global data
 434   // structures. Then, they enter the second sync barrier. This
 435   // ensure, that no task starts doing work before all data
 436   // structures (local and global) have been re-initialized. When they
 437   // exit it, they are free to start working again.
 438   WorkGangBarrierSync     _first_overflow_barrier_sync;
 439   WorkGangBarrierSync     _second_overflow_barrier_sync;
 440 
 441   // This is set by any task, when an overflow on the global data
 442   // structures is detected
 443   volatile bool           _has_overflown;
 444   // True: marking is concurrent, false: we're in remark
 445   volatile bool           _concurrent;
 446   // Set at the end of a Full GC so that marking aborts
 447   volatile bool           _has_aborted;
 448   GCId                    _aborted_gc_id;
 449 
 450   // Used when remark aborts due to an overflow to indicate that
 451   // another concurrent marking phase should start
 452   volatile bool           _restart_for_overflow;
 453 
 454   // This is true from the very start of concurrent marking until the
 455   // point when all the tasks complete their work. It is really used
 456   // to determine the points between the end of concurrent marking and
 457   // time of remark.
 458   volatile bool           _concurrent_marking_in_progress;
 459 
 460   // Verbose level
 461   CMVerboseLevel          _verbose_level;
 462 
 463   // All of these times are in ms
 464   NumberSeq _init_times;
 465   NumberSeq _remark_times;
 466   NumberSeq   _remark_mark_times;
 467   NumberSeq   _remark_weak_ref_times;
 468   NumberSeq _cleanup_times;
 469   double    _total_counting_time;
 470   double    _total_rs_scrub_time;
 471 
 472   double*   _accum_task_vtime;   // Accumulated task vtime
 473 
 474   FlexibleWorkGang* _parallel_workers;
 475 
 476   ForceOverflowSettings _force_overflow_conc;
 477   ForceOverflowSettings _force_overflow_stw;
 478 
 479   void weakRefsWork(bool clear_all_soft_refs);
 480 
 481   void swapMarkBitMaps();
 482 
 483   // It resets the global marking data structures, as well as the
 484   // task local ones; should be called during initial mark.
 485   void reset();
 486 
 487   // Resets all the marking data structures. Called when we have to restart
 488   // marking or when marking completes (via set_non_marking_state below).
 489   void reset_marking_state(bool clear_overflow = true);
 490 
 491   // We do this after we're done with marking so that the marking data
 492   // structures are initialized to a sensible and predictable state.
 493   void set_non_marking_state();
 494 
 495   // Called to indicate how many threads are currently active.
 496   void set_concurrency(uint active_tasks);
 497 
 498   // It should be called to indicate which phase we're in (concurrent
 499   // mark or remark) and how many threads are currently active.
 500   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 501 
 502   // Prints all gathered CM-related statistics
 503   void print_stats();
 504 
 505   bool cleanup_list_is_empty() {
 506     return _cleanup_list.is_empty();
 507   }
 508 
 509   // Accessor methods
 510   uint parallel_marking_threads() const     { return _parallel_marking_threads; }
 511   uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
 512   double sleep_factor()                     { return _sleep_factor; }
 513   double marking_task_overhead()            { return _marking_task_overhead;}
 514   double cleanup_sleep_factor()             { return _cleanup_sleep_factor; }
 515   double cleanup_task_overhead()            { return _cleanup_task_overhead;}
 516 
 517   bool use_parallel_marking_threads() const {
 518     assert(parallel_marking_threads() <=
 519            max_parallel_marking_threads(), "sanity");
 520     assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
 521            parallel_marking_threads() > 0,
 522            "parallel workers not set up correctly");
 523     return _parallel_workers != NULL;
 524   }
 525 
 526   HeapWord*               finger()          { return _finger;   }
 527   bool                    concurrent()      { return _concurrent; }
 528   uint                    active_tasks()    { return _active_tasks; }
 529   ParallelTaskTerminator* terminator()      { return &_terminator; }
 530 
 531   // It claims the next available region to be scanned by a marking
 532   // task/thread. It might return NULL if the next region is empty or
 533   // we have run out of regions. In the latter case, out_of_regions()
 534   // determines whether we've really run out of regions or the task
 535   // should call claim_region() again. This might seem a bit
 536   // awkward. Originally, the code was written so that claim_region()
 537   // either successfully returned with a non-empty region or there
 538   // were no more regions to be claimed. The problem with this was
 539   // that, in certain circumstances, it iterated over large chunks of
 540   // the heap finding only empty regions and, while it was working, it
 541   // was preventing the calling task to call its regular clock
 542   // method. So, this way, each task will spend very little time in
 543   // claim_region() and is allowed to call the regular clock method
 544   // frequently.
 545   HeapRegion* claim_region(uint worker_id);
 546 
 547   // It determines whether we've run out of regions to scan. Note that
 548   // the finger can point past the heap end in case the heap was expanded
 549   // to satisfy an allocation without doing a GC. This is fine, because all
 550   // objects in those regions will be considered live anyway because of
 551   // SATB guarantees (i.e. their TAMS will be equal to bottom).
 552   bool        out_of_regions() { return _finger >= _heap_end; }
 553 
 554   // Returns the task with the given id
 555   CMTask* task(int id) {
 556     assert(0 <= id && id < (int) _active_tasks,
 557            "task id not within active bounds");
 558     return _tasks[id];
 559   }
 560 
 561   // Returns the task queue with the given id
 562   CMTaskQueue* task_queue(int id) {
 563     assert(0 <= id && id < (int) _active_tasks,
 564            "task queue id not within active bounds");
 565     return (CMTaskQueue*) _task_queues->queue(id);
 566   }
 567 
 568   // Returns the task queue set
 569   CMTaskQueueSet* task_queues()  { return _task_queues; }
 570 
 571   // Access / manipulation of the overflow flag which is set to
 572   // indicate that the global stack has overflown
 573   bool has_overflown()           { return _has_overflown; }
 574   void set_has_overflown()       { _has_overflown = true; }
 575   void clear_has_overflown()     { _has_overflown = false; }
 576   bool restart_for_overflow()    { return _restart_for_overflow; }
 577 
 578   // Methods to enter the two overflow sync barriers
 579   void enter_first_sync_barrier(uint worker_id);
 580   void enter_second_sync_barrier(uint worker_id);
 581 
 582   ForceOverflowSettings* force_overflow_conc() {
 583     return &_force_overflow_conc;
 584   }
 585 
 586   ForceOverflowSettings* force_overflow_stw() {
 587     return &_force_overflow_stw;
 588   }
 589 
 590   ForceOverflowSettings* force_overflow() {
 591     if (concurrent()) {
 592       return force_overflow_conc();
 593     } else {
 594       return force_overflow_stw();
 595     }
 596   }
 597 
 598   // Live Data Counting data structures...
 599   // These data structures are initialized at the start of
 600   // marking. They are written to while marking is active.
 601   // They are aggregated during remark; the aggregated values
 602   // are then used to populate the _region_bm, _card_bm, and
 603   // the total live bytes, which are then subsequently updated
 604   // during cleanup.
 605 
 606   // An array of bitmaps (one bit map per task). Each bitmap
 607   // is used to record the cards spanned by the live objects
 608   // marked by that task/worker.
 609   BitMap*  _count_card_bitmaps;
 610 
 611   // Used to record the number of marked live bytes
 612   // (for each region, by worker thread).
 613   size_t** _count_marked_bytes;
 614 
 615   // Card index of the bottom of the G1 heap. Used for biasing indices into
 616   // the card bitmaps.
 617   intptr_t _heap_bottom_card_num;
 618 
 619   // Set to true when initialization is complete
 620   bool _completed_initialization;
 621 
 622 public:
 623   // Manipulation of the global mark stack.
 624   // Notice that the first mark_stack_push is CAS-based, whereas the
 625   // two below are Mutex-based. This is OK since the first one is only
 626   // called during evacuation pauses and doesn't compete with the
 627   // other two (which are called by the marking tasks during
 628   // concurrent marking or remark).
 629   bool mark_stack_push(oop p) {
 630     _markStack.par_push(p);
 631     if (_markStack.overflow()) {
 632       set_has_overflown();
 633       return false;
 634     }
 635     return true;
 636   }
 637   bool mark_stack_push(oop* arr, int n) {
 638     _markStack.par_push_arr(arr, n);
 639     if (_markStack.overflow()) {
 640       set_has_overflown();
 641       return false;
 642     }
 643     return true;
 644   }
 645   void mark_stack_pop(oop* arr, int max, int* n) {
 646     _markStack.par_pop_arr(arr, max, n);
 647   }
 648   size_t mark_stack_size()                { return _markStack.size(); }
 649   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 650   bool mark_stack_overflow()              { return _markStack.overflow(); }
 651   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 652 
 653   CMRootRegions* root_regions() { return &_root_regions; }
 654 
 655   bool concurrent_marking_in_progress() {
 656     return _concurrent_marking_in_progress;
 657   }
 658   void set_concurrent_marking_in_progress() {
 659     _concurrent_marking_in_progress = true;
 660   }
 661   void clear_concurrent_marking_in_progress() {
 662     _concurrent_marking_in_progress = false;
 663   }
 664 
 665   void update_accum_task_vtime(int i, double vtime) {
 666     _accum_task_vtime[i] += vtime;
 667   }
 668 
 669   double all_task_accum_vtime() {
 670     double ret = 0.0;
 671     for (uint i = 0; i < _max_worker_id; ++i)
 672       ret += _accum_task_vtime[i];
 673     return ret;
 674   }
 675 
 676   // Attempts to steal an object from the task queues of other tasks
 677   bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
 678     return _task_queues->steal(worker_id, hash_seed, obj);
 679   }
 680 
 681   ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
 682   ~ConcurrentMark();
 683 
 684   ConcurrentMarkThread* cmThread() { return _cmThread; }
 685 
 686   CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
 687   CMBitMap*   nextMarkBitMap() const { return _nextMarkBitMap; }
 688 
 689   // Returns the number of GC threads to be used in a concurrent
 690   // phase based on the number of GC threads being used in a STW
 691   // phase.
 692   uint scale_parallel_threads(uint n_par_threads);
 693 
 694   // Calculates the number of GC threads to be used in a concurrent phase.
 695   uint calc_parallel_marking_threads();
 696 
 697   // The following three are interaction between CM and
 698   // G1CollectedHeap
 699 
 700   // This notifies CM that a root during initial-mark needs to be
 701   // grayed. It is MT-safe. word_size is the size of the object in
 702   // words. It is passed explicitly as sometimes we cannot calculate
 703   // it from the given object because it might be in an inconsistent
 704   // state (e.g., in to-space and being copied). So the caller is
 705   // responsible for dealing with this issue (e.g., get the size from
 706   // the from-space image when the to-space image might be
 707   // inconsistent) and always passing the size. hr is the region that
 708   // contains the object and it's passed optionally from callers who
 709   // might already have it (no point in recalculating it).
 710   inline void grayRoot(oop obj, size_t word_size,
 711                        uint worker_id, HeapRegion* hr = NULL);
 712 
 713   // It iterates over the heap and for each object it comes across it
 714   // will dump the contents of its reference fields, as well as
 715   // liveness information for the object and its referents. The dump
 716   // will be written to a file with the following name:
 717   // G1PrintReachableBaseFile + "." + str.
 718   // vo decides whether the prev (vo == UsePrevMarking), the next
 719   // (vo == UseNextMarking) marking information, or the mark word
 720   // (vo == UseMarkWord) will be used to determine the liveness of
 721   // each object / referent.
 722   // If all is true, all objects in the heap will be dumped, otherwise
 723   // only the live ones. In the dump the following symbols / breviations
 724   // are used:
 725   //   M : an explicitly live object (its bitmap bit is set)
 726   //   > : an implicitly live object (over tams)
 727   //   O : an object outside the G1 heap (typically: in the perm gen)
 728   //   NOT : a reference field whose referent is not live
 729   //   AND MARKED : indicates that an object is both explicitly and
 730   //   implicitly live (it should be one or the other, not both)
 731   void print_reachable(const char* str,
 732                        VerifyOption vo, bool all) PRODUCT_RETURN;
 733 
 734   // Clear the next marking bitmap (will be called concurrently).
 735   void clearNextBitmap();
 736 
 737   // These two do the work that needs to be done before and after the
 738   // initial root checkpoint. Since this checkpoint can be done at two
 739   // different points (i.e. an explicit pause or piggy-backed on a
 740   // young collection), then it's nice to be able to easily share the
 741   // pre/post code. It might be the case that we can put everything in
 742   // the post method. TP
 743   void checkpointRootsInitialPre();
 744   void checkpointRootsInitialPost();
 745 
 746   // Scan all the root regions and mark everything reachable from
 747   // them.
 748   void scanRootRegions();
 749 
 750   // Scan a single root region and mark everything reachable from it.
 751   void scanRootRegion(HeapRegion* hr, uint worker_id);
 752 
 753   // Do concurrent phase of marking, to a tentative transitive closure.
 754   void markFromRoots();
 755 
 756   void checkpointRootsFinal(bool clear_all_soft_refs);
 757   void checkpointRootsFinalWork();
 758   void cleanup();
 759   void completeCleanup();
 760 
 761   // Mark in the previous bitmap.  NB: this is usually read-only, so use
 762   // this carefully!
 763   inline void markPrev(oop p);
 764 
 765   // Clears marks for all objects in the given range, for the prev,
 766   // next, or both bitmaps.  NB: the previous bitmap is usually
 767   // read-only, so use this carefully!
 768   void clearRangePrevBitmap(MemRegion mr);
 769   void clearRangeNextBitmap(MemRegion mr);
 770   void clearRangeBothBitmaps(MemRegion mr);
 771 
 772   // Notify data structures that a GC has started.
 773   void note_start_of_gc() {
 774     _markStack.note_start_of_gc();
 775   }
 776 
 777   // Notify data structures that a GC is finished.
 778   void note_end_of_gc() {
 779     _markStack.note_end_of_gc();
 780   }
 781 
 782   // Verify that there are no CSet oops on the stacks (taskqueues /
 783   // global mark stack), enqueued SATB buffers, per-thread SATB
 784   // buffers, and fingers (global / per-task). The boolean parameters
 785   // decide which of the above data structures to verify. If marking
 786   // is not in progress, it's a no-op.
 787   void verify_no_cset_oops(bool verify_stacks,
 788                            bool verify_enqueued_buffers,
 789                            bool verify_thread_buffers,
 790                            bool verify_fingers) PRODUCT_RETURN;
 791 
 792   // It is called at the end of an evacuation pause during marking so
 793   // that CM is notified of where the new end of the heap is. It
 794   // doesn't do anything if concurrent_marking_in_progress() is false,
 795   // unless the force parameter is true.
 796   void update_g1_committed(bool force = false);
 797 
 798   bool isMarked(oop p) const {
 799     assert(p != NULL && p->is_oop(), "expected an oop");
 800     HeapWord* addr = (HeapWord*)p;
 801     assert(addr >= _nextMarkBitMap->startWord() ||
 802            addr < _nextMarkBitMap->endWord(), "in a region");
 803 
 804     return _nextMarkBitMap->isMarked(addr);
 805   }
 806 
 807   inline bool not_yet_marked(oop p) const;
 808 
 809   // XXX Debug code
 810   bool containing_card_is_marked(void* p);
 811   bool containing_cards_are_marked(void* start, void* last);
 812 
 813   bool isPrevMarked(oop p) const {
 814     assert(p != NULL && p->is_oop(), "expected an oop");
 815     HeapWord* addr = (HeapWord*)p;
 816     assert(addr >= _prevMarkBitMap->startWord() ||
 817            addr < _prevMarkBitMap->endWord(), "in a region");
 818 
 819     return _prevMarkBitMap->isMarked(addr);
 820   }
 821 
 822   inline bool do_yield_check(uint worker_i = 0);
 823 
 824   // Called to abort the marking cycle after a Full GC takes place.
 825   void abort();
 826 
 827   bool has_aborted()      { return _has_aborted; }
 828 
 829   const GCId& concurrent_gc_id();
 830 
 831   // This prints the global/local fingers. It is used for debugging.
 832   NOT_PRODUCT(void print_finger();)
 833 
 834   void print_summary_info();
 835 
 836   void print_worker_threads_on(outputStream* st) const;
 837 
 838   void print_on_error(outputStream* st) const;
 839 
 840   // The following indicate whether a given verbose level has been
 841   // set. Notice that anything above stats is conditional to
 842   // _MARKING_VERBOSE_ having been set to 1
 843   bool verbose_stats() {
 844     return _verbose_level >= stats_verbose;
 845   }
 846   bool verbose_low() {
 847     return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
 848   }
 849   bool verbose_medium() {
 850     return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
 851   }
 852   bool verbose_high() {
 853     return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
 854   }
 855 
 856   // Liveness counting
 857 
 858   // Utility routine to set an exclusive range of cards on the given
 859   // card liveness bitmap
 860   inline void set_card_bitmap_range(BitMap* card_bm,
 861                                     BitMap::idx_t start_idx,
 862                                     BitMap::idx_t end_idx,
 863                                     bool is_par);
 864 
 865   // Returns the card number of the bottom of the G1 heap.
 866   // Used in biasing indices into accounting card bitmaps.
 867   intptr_t heap_bottom_card_num() const {
 868     return _heap_bottom_card_num;
 869   }
 870 
 871   // Returns the card bitmap for a given task or worker id.
 872   BitMap* count_card_bitmap_for(uint worker_id) {
 873     assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
 874     assert(_count_card_bitmaps != NULL, "uninitialized");
 875     BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
 876     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
 877     return task_card_bm;
 878   }
 879 
 880   // Returns the array containing the marked bytes for each region,
 881   // for the given worker or task id.
 882   size_t* count_marked_bytes_array_for(uint worker_id) {
 883     assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
 884     assert(_count_marked_bytes != NULL, "uninitialized");
 885     size_t* marked_bytes_array = _count_marked_bytes[worker_id];
 886     assert(marked_bytes_array != NULL, "uninitialized");
 887     return marked_bytes_array;
 888   }
 889 
 890   // Returns the index in the liveness accounting card table bitmap
 891   // for the given address
 892   inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
 893 
 894   // Counts the size of the given memory region in the the given
 895   // marked_bytes array slot for the given HeapRegion.
 896   // Sets the bits in the given card bitmap that are associated with the
 897   // cards that are spanned by the memory region.
 898   inline void count_region(MemRegion mr, HeapRegion* hr,
 899                            size_t* marked_bytes_array,
 900                            BitMap* task_card_bm);
 901 
 902   // Counts the given memory region in the task/worker counting
 903   // data structures for the given worker id.
 904   inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
 905 
 906   // Counts the given memory region in the task/worker counting
 907   // data structures for the given worker id.
 908   inline void count_region(MemRegion mr, uint worker_id);
 909 
 910   // Counts the given object in the given task/worker counting
 911   // data structures.
 912   inline void count_object(oop obj, HeapRegion* hr,
 913                            size_t* marked_bytes_array,
 914                            BitMap* task_card_bm);
 915 
 916   // Counts the given object in the task/worker counting data
 917   // structures for the given worker id.
 918   inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
 919 
 920   // Attempts to mark the given object and, if successful, counts
 921   // the object in the given task/worker counting structures.
 922   inline bool par_mark_and_count(oop obj, HeapRegion* hr,
 923                                  size_t* marked_bytes_array,
 924                                  BitMap* task_card_bm);
 925 
 926   // Attempts to mark the given object and, if successful, counts
 927   // the object in the task/worker counting structures for the
 928   // given worker id.
 929   inline bool par_mark_and_count(oop obj, size_t word_size,
 930                                  HeapRegion* hr, uint worker_id);
 931 
 932   // Attempts to mark the given object and, if successful, counts
 933   // the object in the task/worker counting structures for the
 934   // given worker id.
 935   inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
 936 
 937   // Similar to the above routine but we don't know the heap region that
 938   // contains the object to be marked/counted, which this routine looks up.
 939   inline bool par_mark_and_count(oop obj, uint worker_id);
 940 
 941   // Similar to the above routine but there are times when we cannot
 942   // safely calculate the size of obj due to races and we, therefore,
 943   // pass the size in as a parameter. It is the caller's responsibility
 944   // to ensure that the size passed in for obj is valid.
 945   inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
 946 
 947   // Unconditionally mark the given object, and unconditionally count
 948   // the object in the counting structures for worker id 0.
 949   // Should *not* be called from parallel code.
 950   inline bool mark_and_count(oop obj, HeapRegion* hr);
 951 
 952   // Similar to the above routine but we don't know the heap region that
 953   // contains the object to be marked/counted, which this routine looks up.
 954   // Should *not* be called from parallel code.
 955   inline bool mark_and_count(oop obj);
 956 
 957   // Returns true if initialization was successfully completed.
 958   bool completed_initialization() const {
 959     return _completed_initialization;
 960   }
 961 
 962 protected:
 963   // Clear all the per-task bitmaps and arrays used to store the
 964   // counting data.
 965   void clear_all_count_data();
 966 
 967   // Aggregates the counting data for each worker/task
 968   // that was constructed while marking. Also sets
 969   // the amount of marked bytes for each region and
 970   // the top at concurrent mark count.
 971   void aggregate_count_data();
 972 
 973   // Verification routine
 974   void verify_count_data();
 975 };
 976 
 977 // A class representing a marking task.
 978 class CMTask : public TerminatorTerminator {
 979 private:
 980   enum PrivateConstants {
 981     // the regular clock call is called once the scanned words reaches
 982     // this limit
 983     words_scanned_period          = 12*1024,
 984     // the regular clock call is called once the number of visited
 985     // references reaches this limit
 986     refs_reached_period           = 384,
 987     // initial value for the hash seed, used in the work stealing code
 988     init_hash_seed                = 17,
 989     // how many entries will be transferred between global stack and
 990     // local queues
 991     global_stack_transfer_size    = 16
 992   };
 993 
 994   uint                        _worker_id;
 995   G1CollectedHeap*            _g1h;
 996   ConcurrentMark*             _cm;
 997   CMBitMap*                   _nextMarkBitMap;
 998   // the task queue of this task
 999   CMTaskQueue*                _task_queue;
1000 private:
1001   // the task queue set---needed for stealing
1002   CMTaskQueueSet*             _task_queues;
1003   // indicates whether the task has been claimed---this is only  for
1004   // debugging purposes
1005   bool                        _claimed;
1006 
1007   // number of calls to this task
1008   int                         _calls;
1009 
1010   // when the virtual timer reaches this time, the marking step should
1011   // exit
1012   double                      _time_target_ms;
1013   // the start time of the current marking step
1014   double                      _start_time_ms;
1015 
1016   // the oop closure used for iterations over oops
1017   G1CMOopClosure*             _cm_oop_closure;
1018 
1019   // the region this task is scanning, NULL if we're not scanning any
1020   HeapRegion*                 _curr_region;
1021   // the local finger of this task, NULL if we're not scanning a region
1022   HeapWord*                   _finger;
1023   // limit of the region this task is scanning, NULL if we're not scanning one
1024   HeapWord*                   _region_limit;
1025 
1026   // the number of words this task has scanned
1027   size_t                      _words_scanned;
1028   // When _words_scanned reaches this limit, the regular clock is
1029   // called. Notice that this might be decreased under certain
1030   // circumstances (i.e. when we believe that we did an expensive
1031   // operation).
1032   size_t                      _words_scanned_limit;
1033   // the initial value of _words_scanned_limit (i.e. what it was
1034   // before it was decreased).
1035   size_t                      _real_words_scanned_limit;
1036 
1037   // the number of references this task has visited
1038   size_t                      _refs_reached;
1039   // When _refs_reached reaches this limit, the regular clock is
1040   // called. Notice this this might be decreased under certain
1041   // circumstances (i.e. when we believe that we did an expensive
1042   // operation).
1043   size_t                      _refs_reached_limit;
1044   // the initial value of _refs_reached_limit (i.e. what it was before
1045   // it was decreased).
1046   size_t                      _real_refs_reached_limit;
1047 
1048   // used by the work stealing stuff
1049   int                         _hash_seed;
1050   // if this is true, then the task has aborted for some reason
1051   bool                        _has_aborted;
1052   // set when the task aborts because it has met its time quota
1053   bool                        _has_timed_out;
1054   // true when we're draining SATB buffers; this avoids the task
1055   // aborting due to SATB buffers being available (as we're already
1056   // dealing with them)
1057   bool                        _draining_satb_buffers;
1058 
1059   // number sequence of past step times
1060   NumberSeq                   _step_times_ms;
1061   // elapsed time of this task
1062   double                      _elapsed_time_ms;
1063   // termination time of this task
1064   double                      _termination_time_ms;
1065   // when this task got into the termination protocol
1066   double                      _termination_start_time_ms;
1067 
1068   // true when the task is during a concurrent phase, false when it is
1069   // in the remark phase (so, in the latter case, we do not have to
1070   // check all the things that we have to check during the concurrent
1071   // phase, i.e. SATB buffer availability...)
1072   bool                        _concurrent;
1073 
1074   TruncatedSeq                _marking_step_diffs_ms;
1075 
1076   // Counting data structures. Embedding the task's marked_bytes_array
1077   // and card bitmap into the actual task saves having to go through
1078   // the ConcurrentMark object.
1079   size_t*                     _marked_bytes_array;
1080   BitMap*                     _card_bm;
1081 
1082   // LOTS of statistics related with this task
1083 #if _MARKING_STATS_
1084   NumberSeq                   _all_clock_intervals_ms;
1085   double                      _interval_start_time_ms;
1086 
1087   int                         _aborted;
1088   int                         _aborted_overflow;
1089   int                         _aborted_cm_aborted;
1090   int                         _aborted_yield;
1091   int                         _aborted_timed_out;
1092   int                         _aborted_satb;
1093   int                         _aborted_termination;
1094 
1095   int                         _steal_attempts;
1096   int                         _steals;
1097 
1098   int                         _clock_due_to_marking;
1099   int                         _clock_due_to_scanning;
1100 
1101   int                         _local_pushes;
1102   int                         _local_pops;
1103   int                         _local_max_size;
1104   int                         _objs_scanned;
1105 
1106   int                         _global_pushes;
1107   int                         _global_pops;
1108   int                         _global_max_size;
1109 
1110   int                         _global_transfers_to;
1111   int                         _global_transfers_from;
1112 
1113   int                         _regions_claimed;
1114   int                         _objs_found_on_bitmap;
1115 
1116   int                         _satb_buffers_processed;
1117 #endif // _MARKING_STATS_
1118 
1119   // it updates the local fields after this task has claimed
1120   // a new region to scan
1121   void setup_for_region(HeapRegion* hr);
1122   // it brings up-to-date the limit of the region
1123   void update_region_limit();
1124 
1125   // called when either the words scanned or the refs visited limit
1126   // has been reached
1127   void reached_limit();
1128   // recalculates the words scanned and refs visited limits
1129   void recalculate_limits();
1130   // decreases the words scanned and refs visited limits when we reach
1131   // an expensive operation
1132   void decrease_limits();
1133   // it checks whether the words scanned or refs visited reached their
1134   // respective limit and calls reached_limit() if they have
1135   void check_limits() {
1136     if (_words_scanned >= _words_scanned_limit ||
1137         _refs_reached >= _refs_reached_limit) {
1138       reached_limit();
1139     }
1140   }
1141   // this is supposed to be called regularly during a marking step as
1142   // it checks a bunch of conditions that might cause the marking step
1143   // to abort
1144   void regular_clock_call();
1145   bool concurrent() { return _concurrent; }
1146 
1147 public:
1148   // It resets the task; it should be called right at the beginning of
1149   // a marking phase.
1150   void reset(CMBitMap* _nextMarkBitMap);
1151   // it clears all the fields that correspond to a claimed region.
1152   void clear_region_fields();
1153 
1154   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1155 
1156   // The main method of this class which performs a marking step
1157   // trying not to exceed the given duration. However, it might exit
1158   // prematurely, according to some conditions (i.e. SATB buffers are
1159   // available for processing).
1160   void do_marking_step(double target_ms,
1161                        bool do_termination,
1162                        bool is_serial);
1163 
1164   // These two calls start and stop the timer
1165   void record_start_time() {
1166     _elapsed_time_ms = os::elapsedTime() * 1000.0;
1167   }
1168   void record_end_time() {
1169     _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1170   }
1171 
1172   // returns the worker ID associated with this task.
1173   uint worker_id() { return _worker_id; }
1174 
1175   // From TerminatorTerminator. It determines whether this task should
1176   // exit the termination protocol after it's entered it.
1177   virtual bool should_exit_termination();
1178 
1179   // Resets the local region fields after a task has finished scanning a
1180   // region; or when they have become stale as a result of the region
1181   // being evacuated.
1182   void giveup_current_region();
1183 
1184   HeapWord* finger()            { return _finger; }
1185 
1186   bool has_aborted()            { return _has_aborted; }
1187   void set_has_aborted()        { _has_aborted = true; }
1188   void clear_has_aborted()      { _has_aborted = false; }
1189   bool has_timed_out()          { return _has_timed_out; }
1190   bool claimed()                { return _claimed; }
1191 
1192   void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1193 
1194   // It grays the object by marking it and, if necessary, pushing it
1195   // on the local queue
1196   inline void deal_with_reference(oop obj);
1197 
1198   // It scans an object and visits its children.
1199   void scan_object(oop obj);
1200 
1201   // It pushes an object on the local queue.
1202   inline void push(oop obj);
1203 
1204   // These two move entries to/from the global stack.
1205   void move_entries_to_global_stack();
1206   void get_entries_from_global_stack();
1207 
1208   // It pops and scans objects from the local queue. If partially is
1209   // true, then it stops when the queue size is of a given limit. If
1210   // partially is false, then it stops when the queue is empty.
1211   void drain_local_queue(bool partially);
1212   // It moves entries from the global stack to the local queue and
1213   // drains the local queue. If partially is true, then it stops when
1214   // both the global stack and the local queue reach a given size. If
1215   // partially if false, it tries to empty them totally.
1216   void drain_global_stack(bool partially);
1217   // It keeps picking SATB buffers and processing them until no SATB
1218   // buffers are available.
1219   void drain_satb_buffers();
1220 
1221   // moves the local finger to a new location
1222   inline void move_finger_to(HeapWord* new_finger) {
1223     assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1224     _finger = new_finger;
1225   }
1226 
1227   CMTask(uint worker_id, ConcurrentMark *cm,
1228          size_t* marked_bytes, BitMap* card_bm,
1229          CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1230 
1231   // it prints statistics associated with this task
1232   void print_stats();
1233 
1234 #if _MARKING_STATS_
1235   void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1236 #endif // _MARKING_STATS_
1237 };
1238 
1239 // Class that's used to to print out per-region liveness
1240 // information. It's currently used at the end of marking and also
1241 // after we sort the old regions at the end of the cleanup operation.
1242 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1243 private:
1244   outputStream* _out;
1245 
1246   // Accumulators for these values.
1247   size_t _total_used_bytes;
1248   size_t _total_capacity_bytes;
1249   size_t _total_prev_live_bytes;
1250   size_t _total_next_live_bytes;
1251 
1252   // These are set up when we come across a "stars humongous" region
1253   // (as this is where most of this information is stored, not in the
1254   // subsequent "continues humongous" regions). After that, for every
1255   // region in a given humongous region series we deduce the right
1256   // values for it by simply subtracting the appropriate amount from
1257   // these fields. All these values should reach 0 after we've visited
1258   // the last region in the series.
1259   size_t _hum_used_bytes;
1260   size_t _hum_capacity_bytes;
1261   size_t _hum_prev_live_bytes;
1262   size_t _hum_next_live_bytes;
1263 
1264   // Accumulator for the remembered set size
1265   size_t _total_remset_bytes;
1266 
1267   // Accumulator for strong code roots memory size
1268   size_t _total_strong_code_roots_bytes;
1269 
1270   static double perc(size_t val, size_t total) {
1271     if (total == 0) {
1272       return 0.0;
1273     } else {
1274       return 100.0 * ((double) val / (double) total);
1275     }
1276   }
1277 
1278   static double bytes_to_mb(size_t val) {
1279     return (double) val / (double) M;
1280   }
1281 
1282   // See the .cpp file.
1283   size_t get_hum_bytes(size_t* hum_bytes);
1284   void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1285                      size_t* prev_live_bytes, size_t* next_live_bytes);
1286 
1287 public:
1288   // The header and footer are printed in the constructor and
1289   // destructor respectively.
1290   G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1291   virtual bool doHeapRegion(HeapRegion* r);
1292   ~G1PrintRegionLivenessInfoClosure();
1293 };
1294 
1295 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP