1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc_implementation/g1/heapRegionSet.hpp"
  30 #include "gc_implementation/shared/gcId.hpp"
  31 #include "utilities/taskqueue.hpp"
  32 
  33 class G1CollectedHeap;
  34 class CMTask;
  35 typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
  36 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
  37 
  38 // Closure used by CM during concurrent reference discovery
  39 // and reference processing (during remarking) to determine
  40 // if a particular object is alive. It is primarily used
  41 // to determine if referents of discovered reference objects
  42 // are alive. An instance is also embedded into the
  43 // reference processor as the _is_alive_non_header field
  44 class G1CMIsAliveClosure: public BoolObjectClosure {
  45   G1CollectedHeap* _g1;
  46  public:
  47   G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
  48 
  49   bool do_object_b(oop obj);
  50 };
  51 
  52 // A generic CM bit map.  This is essentially a wrapper around the BitMap
  53 // class, with one bit per (1<<_shifter) HeapWords.
  54 
  55 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
  56  protected:
  57   HeapWord* _bmStartWord;      // base address of range covered by map
  58   size_t    _bmWordSize;       // map size (in #HeapWords covered)
  59   const int _shifter;          // map to char or bit
  60   VirtualSpace _virtual_space; // underlying the bit map
  61   BitMap    _bm;               // the bit map itself
  62 
  63  public:
  64   // constructor
  65   CMBitMapRO(int shifter);
  66 
  67   enum { do_yield = true };
  68 
  69   // inquiries
  70   HeapWord* startWord()   const { return _bmStartWord; }
  71   size_t    sizeInWords() const { return _bmWordSize;  }
  72   // the following is one past the last word in space
  73   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
  74 
  75   // read marks
  76 
  77   bool isMarked(HeapWord* addr) const {
  78     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  79            "outside underlying space?");
  80     return _bm.at(heapWordToOffset(addr));
  81   }
  82 
  83   // iteration
  84   inline bool iterate(BitMapClosure* cl, MemRegion mr);
  85   inline bool iterate(BitMapClosure* cl);
  86 
  87   // Return the address corresponding to the next marked bit at or after
  88   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
  89   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
  90   HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
  91                                      const HeapWord* limit = NULL) const;
  92   // Return the address corresponding to the next unmarked bit at or after
  93   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
  94   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
  95   HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
  96                                        const HeapWord* limit = NULL) const;
  97 
  98   // conversion utilities
  99   HeapWord* offsetToHeapWord(size_t offset) const {
 100     return _bmStartWord + (offset << _shifter);
 101   }
 102   size_t heapWordToOffset(const HeapWord* addr) const {
 103     return pointer_delta(addr, _bmStartWord) >> _shifter;
 104   }
 105   int heapWordDiffToOffsetDiff(size_t diff) const;
 106 
 107   // The argument addr should be the start address of a valid object
 108   HeapWord* nextObject(HeapWord* addr) {
 109     oop obj = (oop) addr;
 110     HeapWord* res =  addr + obj->size();
 111     assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
 112     return res;
 113   }
 114 
 115   void print_on_error(outputStream* st, const char* prefix) const;
 116 
 117   // debugging
 118   NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
 119 };
 120 
 121 class CMBitMap : public CMBitMapRO {
 122 
 123  public:
 124   // constructor
 125   CMBitMap(int shifter) :
 126     CMBitMapRO(shifter) {}
 127 
 128   // Allocates the back store for the marking bitmap
 129   bool allocate(ReservedSpace heap_rs);
 130 
 131   // write marks
 132   void mark(HeapWord* addr) {
 133     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 134            "outside underlying space?");
 135     _bm.set_bit(heapWordToOffset(addr));
 136   }
 137   void clear(HeapWord* addr) {
 138     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 139            "outside underlying space?");
 140     _bm.clear_bit(heapWordToOffset(addr));
 141   }
 142   bool parMark(HeapWord* addr) {
 143     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 144            "outside underlying space?");
 145     return _bm.par_set_bit(heapWordToOffset(addr));
 146   }
 147   bool parClear(HeapWord* addr) {
 148     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 149            "outside underlying space?");
 150     return _bm.par_clear_bit(heapWordToOffset(addr));
 151   }
 152   void markRange(MemRegion mr);
 153   void clearAll();
 154   void clearRange(MemRegion mr);
 155 
 156   // Starting at the bit corresponding to "addr" (inclusive), find the next
 157   // "1" bit, if any.  This bit starts some run of consecutive "1"'s; find
 158   // the end of this run (stopping at "end_addr").  Return the MemRegion
 159   // covering from the start of the region corresponding to the first bit
 160   // of the run to the end of the region corresponding to the last bit of
 161   // the run.  If there is no "1" bit at or after "addr", return an empty
 162   // MemRegion.
 163   MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
 164 };
 165 
 166 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
 167 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
 168   VirtualSpace _virtual_space;   // Underlying backing store for actual stack
 169   ConcurrentMark* _cm;
 170   oop* _base;        // bottom of stack
 171   jint _index;       // one more than last occupied index
 172   jint _capacity;    // max #elements
 173   jint _saved_index; // value of _index saved at start of GC
 174   NOT_PRODUCT(jint _max_depth;)   // max depth plumbed during run
 175 
 176   bool  _overflow;
 177   bool  _should_expand;
 178   DEBUG_ONLY(bool _drain_in_progress;)
 179   DEBUG_ONLY(bool _drain_in_progress_yields;)
 180 
 181  public:
 182   CMMarkStack(ConcurrentMark* cm);
 183   ~CMMarkStack();
 184 
 185 #ifndef PRODUCT
 186   jint max_depth() const {
 187     return _max_depth;
 188   }
 189 #endif
 190 
 191   bool allocate(size_t capacity);
 192 
 193   oop pop() {
 194     if (!isEmpty()) {
 195       return _base[--_index] ;
 196     }
 197     return NULL;
 198   }
 199 
 200   // If overflow happens, don't do the push, and record the overflow.
 201   // *Requires* that "ptr" is already marked.
 202   void push(oop ptr) {
 203     if (isFull()) {
 204       // Record overflow.
 205       _overflow = true;
 206       return;
 207     } else {
 208       _base[_index++] = ptr;
 209       NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
 210     }
 211   }
 212   // Non-block impl.  Note: concurrency is allowed only with other
 213   // "par_push" operations, not with "pop" or "drain".  We would need
 214   // parallel versions of them if such concurrency was desired.
 215   void par_push(oop ptr);
 216 
 217   // Pushes the first "n" elements of "ptr_arr" on the stack.
 218   // Non-block impl.  Note: concurrency is allowed only with other
 219   // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
 220   void par_adjoin_arr(oop* ptr_arr, int n);
 221 
 222   // Pushes the first "n" elements of "ptr_arr" on the stack.
 223   // Locking impl: concurrency is allowed only with
 224   // "par_push_arr" and/or "par_pop_arr" operations, which use the same
 225   // locking strategy.
 226   void par_push_arr(oop* ptr_arr, int n);
 227 
 228   // If returns false, the array was empty.  Otherwise, removes up to "max"
 229   // elements from the stack, and transfers them to "ptr_arr" in an
 230   // unspecified order.  The actual number transferred is given in "n" ("n
 231   // == 0" is deliberately redundant with the return value.)  Locking impl:
 232   // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
 233   // operations, which use the same locking strategy.
 234   bool par_pop_arr(oop* ptr_arr, int max, int* n);
 235 
 236   // Drain the mark stack, applying the given closure to all fields of
 237   // objects on the stack.  (That is, continue until the stack is empty,
 238   // even if closure applications add entries to the stack.)  The "bm"
 239   // argument, if non-null, may be used to verify that only marked objects
 240   // are on the mark stack.  If "yield_after" is "true", then the
 241   // concurrent marker performing the drain offers to yield after
 242   // processing each object.  If a yield occurs, stops the drain operation
 243   // and returns false.  Otherwise, returns true.
 244   template<bool nv, typename OopClosureClass>
 245   bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
 246 
 247   bool isEmpty()    { return _index == 0; }
 248   bool isFull()     { return _index == _capacity; }
 249   int  maxElems()   { return _capacity; }
 250 
 251   bool overflow() { return _overflow; }
 252   void clear_overflow() { _overflow = false; }
 253 
 254   bool should_expand() const { return _should_expand; }
 255   void set_should_expand();
 256 
 257   // Expand the stack, typically in response to an overflow condition
 258   void expand();
 259 
 260   int  size() { return _index; }
 261 
 262   void setEmpty()   { _index = 0; clear_overflow(); }
 263 
 264   // Record the current index.
 265   void note_start_of_gc();
 266 
 267   // Make sure that we have not added any entries to the stack during GC.
 268   void note_end_of_gc();
 269 
 270   // iterate over the oops in the mark stack, up to the bound recorded via
 271   // the call above.
 272   void oops_do(OopClosure* f);
 273 };
 274 
 275 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
 276 private:
 277 #ifndef PRODUCT
 278   uintx _num_remaining;
 279   bool _force;
 280 #endif // !defined(PRODUCT)
 281 
 282 public:
 283   void init() PRODUCT_RETURN;
 284   void update() PRODUCT_RETURN;
 285   bool should_force() PRODUCT_RETURN_( return false; );
 286 };
 287 
 288 // this will enable a variety of different statistics per GC task
 289 #define _MARKING_STATS_       0
 290 // this will enable the higher verbose levels
 291 #define _MARKING_VERBOSE_     0
 292 
 293 #if _MARKING_STATS_
 294 #define statsOnly(statement)  \
 295 do {                          \
 296   statement ;                 \
 297 } while (0)
 298 #else // _MARKING_STATS_
 299 #define statsOnly(statement)  \
 300 do {                          \
 301 } while (0)
 302 #endif // _MARKING_STATS_
 303 
 304 typedef enum {
 305   no_verbose  = 0,   // verbose turned off
 306   stats_verbose,     // only prints stats at the end of marking
 307   low_verbose,       // low verbose, mostly per region and per major event
 308   medium_verbose,    // a bit more detailed than low
 309   high_verbose       // per object verbose
 310 } CMVerboseLevel;
 311 
 312 class YoungList;
 313 
 314 // Root Regions are regions that are not empty at the beginning of a
 315 // marking cycle and which we might collect during an evacuation pause
 316 // while the cycle is active. Given that, during evacuation pauses, we
 317 // do not copy objects that are explicitly marked, what we have to do
 318 // for the root regions is to scan them and mark all objects reachable
 319 // from them. According to the SATB assumptions, we only need to visit
 320 // each object once during marking. So, as long as we finish this scan
 321 // before the next evacuation pause, we can copy the objects from the
 322 // root regions without having to mark them or do anything else to them.
 323 //
 324 // Currently, we only support root region scanning once (at the start
 325 // of the marking cycle) and the root regions are all the survivor
 326 // regions populated during the initial-mark pause.
 327 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
 328 private:
 329   YoungList*           _young_list;
 330   ConcurrentMark*      _cm;
 331 
 332   volatile bool        _scan_in_progress;
 333   volatile bool        _should_abort;
 334   HeapRegion* volatile _next_survivor;
 335 
 336 public:
 337   CMRootRegions();
 338   // We actually do most of the initialization in this method.
 339   void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
 340 
 341   // Reset the claiming / scanning of the root regions.
 342   void prepare_for_scan();
 343 
 344   // Forces get_next() to return NULL so that the iteration aborts early.
 345   void abort() { _should_abort = true; }
 346 
 347   // Return true if the CM thread are actively scanning root regions,
 348   // false otherwise.
 349   bool scan_in_progress() { return _scan_in_progress; }
 350 
 351   // Claim the next root region to scan atomically, or return NULL if
 352   // all have been claimed.
 353   HeapRegion* claim_next();
 354 
 355   // Flag that we're done with root region scanning and notify anyone
 356   // who's waiting on it. If aborted is false, assume that all regions
 357   // have been claimed.
 358   void scan_finished();
 359 
 360   // If CM threads are still scanning root regions, wait until they
 361   // are done. Return true if we had to wait, false otherwise.
 362   bool wait_until_scan_finished();
 363 };
 364 
 365 class ConcurrentMarkThread;
 366 
 367 class ConcurrentMark: public CHeapObj<mtGC> {
 368   friend class CMMarkStack;
 369   friend class ConcurrentMarkThread;
 370   friend class CMTask;
 371   friend class CMBitMapClosure;
 372   friend class CMGlobalObjectClosure;
 373   friend class CMRemarkTask;
 374   friend class CMConcurrentMarkingTask;
 375   friend class G1ParNoteEndTask;
 376   friend class CalcLiveObjectsClosure;
 377   friend class G1CMRefProcTaskProxy;
 378   friend class G1CMRefProcTaskExecutor;
 379   friend class G1CMKeepAliveAndDrainClosure;
 380   friend class G1CMDrainMarkingStackClosure;
 381 
 382 protected:
 383   ConcurrentMarkThread* _cmThread;   // The thread doing the work
 384   G1CollectedHeap*      _g1h;        // The heap
 385   uint                  _parallel_marking_threads; // The number of marking
 386                                                    // threads we're using
 387   uint                  _max_parallel_marking_threads; // Max number of marking
 388                                                        // threads we'll ever use
 389   double                _sleep_factor; // How much we have to sleep, with
 390                                        // respect to the work we just did, to
 391                                        // meet the marking overhead goal
 392   double                _marking_task_overhead; // Marking target overhead for
 393                                                 // a single task
 394 
 395   // Same as the two above, but for the cleanup task
 396   double                _cleanup_sleep_factor;
 397   double                _cleanup_task_overhead;
 398 
 399   FreeRegionList        _cleanup_list;
 400 
 401   // Concurrent marking support structures
 402   CMBitMap                _markBitMap1;
 403   CMBitMap                _markBitMap2;
 404   CMBitMapRO*             _prevMarkBitMap; // Completed mark bitmap
 405   CMBitMap*               _nextMarkBitMap; // Under-construction mark bitmap
 406 
 407   BitMap                  _region_bm;
 408   BitMap                  _card_bm;
 409 
 410   // Heap bounds
 411   HeapWord*               _heap_start;
 412   HeapWord*               _heap_end;
 413 
 414   // Root region tracking and claiming
 415   CMRootRegions           _root_regions;
 416 
 417   // For gray objects
 418   CMMarkStack             _markStack; // Grey objects behind global finger
 419   HeapWord* volatile      _finger;  // The global finger, region aligned,
 420                                     // always points to the end of the
 421                                     // last claimed region
 422 
 423   // Marking tasks
 424   uint                    _max_worker_id;// Maximum worker id
 425   uint                    _active_tasks; // Task num currently active
 426   CMTask**                _tasks;        // Task queue array (max_worker_id len)
 427   CMTaskQueueSet*         _task_queues;  // Task queue set
 428   ParallelTaskTerminator  _terminator;   // For termination
 429 
 430   // Two sync barriers that are used to synchronize tasks when an
 431   // overflow occurs. The algorithm is the following. All tasks enter
 432   // the first one to ensure that they have all stopped manipulating
 433   // the global data structures. After they exit it, they re-initialize
 434   // their data structures and task 0 re-initializes the global data
 435   // structures. Then, they enter the second sync barrier. This
 436   // ensure, that no task starts doing work before all data
 437   // structures (local and global) have been re-initialized. When they
 438   // exit it, they are free to start working again.
 439   WorkGangBarrierSync     _first_overflow_barrier_sync;
 440   WorkGangBarrierSync     _second_overflow_barrier_sync;
 441 
 442   // This is set by any task, when an overflow on the global data
 443   // structures is detected
 444   volatile bool           _has_overflown;
 445   // True: marking is concurrent, false: we're in remark
 446   volatile bool           _concurrent;
 447   // Set at the end of a Full GC so that marking aborts
 448   volatile bool           _has_aborted;
 449   GCId                    _aborted_gc_id;
 450 
 451   // Used when remark aborts due to an overflow to indicate that
 452   // another concurrent marking phase should start
 453   volatile bool           _restart_for_overflow;
 454 
 455   // This is true from the very start of concurrent marking until the
 456   // point when all the tasks complete their work. It is really used
 457   // to determine the points between the end of concurrent marking and
 458   // time of remark.
 459   volatile bool           _concurrent_marking_in_progress;
 460 
 461   // Verbose level
 462   CMVerboseLevel          _verbose_level;
 463 
 464   // All of these times are in ms
 465   NumberSeq _init_times;
 466   NumberSeq _remark_times;
 467   NumberSeq   _remark_mark_times;
 468   NumberSeq   _remark_weak_ref_times;
 469   NumberSeq _cleanup_times;
 470   double    _total_counting_time;
 471   double    _total_rs_scrub_time;
 472 
 473   double*   _accum_task_vtime;   // Accumulated task vtime
 474 
 475   FlexibleWorkGang* _parallel_workers;
 476 
 477   ForceOverflowSettings _force_overflow_conc;
 478   ForceOverflowSettings _force_overflow_stw;
 479 
 480   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 481   void weakRefsWork(bool clear_all_soft_refs);
 482 
 483   void swapMarkBitMaps();
 484 
 485   // It resets the global marking data structures, as well as the
 486   // task local ones; should be called during initial mark.
 487   void reset();
 488 
 489   // Resets all the marking data structures. Called when we have to restart
 490   // marking or when marking completes (via set_non_marking_state below).
 491   void reset_marking_state(bool clear_overflow = true);
 492 
 493   // We do this after we're done with marking so that the marking data
 494   // structures are initialized to a sensible and predictable state.
 495   void set_non_marking_state();
 496 
 497   // Called to indicate how many threads are currently active.
 498   void set_concurrency(uint active_tasks);
 499 
 500   // It should be called to indicate which phase we're in (concurrent
 501   // mark or remark) and how many threads are currently active.
 502   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 503 
 504   // Prints all gathered CM-related statistics
 505   void print_stats();
 506 
 507   bool cleanup_list_is_empty() {
 508     return _cleanup_list.is_empty();
 509   }
 510 
 511   // Accessor methods
 512   uint parallel_marking_threads() const     { return _parallel_marking_threads; }
 513   uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
 514   double sleep_factor()                     { return _sleep_factor; }
 515   double marking_task_overhead()            { return _marking_task_overhead;}
 516   double cleanup_sleep_factor()             { return _cleanup_sleep_factor; }
 517   double cleanup_task_overhead()            { return _cleanup_task_overhead;}
 518 
 519   bool use_parallel_marking_threads() const {
 520     assert(parallel_marking_threads() <=
 521            max_parallel_marking_threads(), "sanity");
 522     assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
 523            parallel_marking_threads() > 0,
 524            "parallel workers not set up correctly");
 525     return _parallel_workers != NULL;
 526   }
 527 
 528   HeapWord*               finger()          { return _finger;   }
 529   bool                    concurrent()      { return _concurrent; }
 530   uint                    active_tasks()    { return _active_tasks; }
 531   ParallelTaskTerminator* terminator()      { return &_terminator; }
 532 
 533   // It claims the next available region to be scanned by a marking
 534   // task/thread. It might return NULL if the next region is empty or
 535   // we have run out of regions. In the latter case, out_of_regions()
 536   // determines whether we've really run out of regions or the task
 537   // should call claim_region() again. This might seem a bit
 538   // awkward. Originally, the code was written so that claim_region()
 539   // either successfully returned with a non-empty region or there
 540   // were no more regions to be claimed. The problem with this was
 541   // that, in certain circumstances, it iterated over large chunks of
 542   // the heap finding only empty regions and, while it was working, it
 543   // was preventing the calling task to call its regular clock
 544   // method. So, this way, each task will spend very little time in
 545   // claim_region() and is allowed to call the regular clock method
 546   // frequently.
 547   HeapRegion* claim_region(uint worker_id);
 548 
 549   // It determines whether we've run out of regions to scan. Note that
 550   // the finger can point past the heap end in case the heap was expanded
 551   // to satisfy an allocation without doing a GC. This is fine, because all
 552   // objects in those regions will be considered live anyway because of
 553   // SATB guarantees (i.e. their TAMS will be equal to bottom).
 554   bool        out_of_regions() { return _finger >= _heap_end; }
 555 
 556   // Returns the task with the given id
 557   CMTask* task(int id) {
 558     assert(0 <= id && id < (int) _active_tasks,
 559            "task id not within active bounds");
 560     return _tasks[id];
 561   }
 562 
 563   // Returns the task queue with the given id
 564   CMTaskQueue* task_queue(int id) {
 565     assert(0 <= id && id < (int) _active_tasks,
 566            "task queue id not within active bounds");
 567     return (CMTaskQueue*) _task_queues->queue(id);
 568   }
 569 
 570   // Returns the task queue set
 571   CMTaskQueueSet* task_queues()  { return _task_queues; }
 572 
 573   // Access / manipulation of the overflow flag which is set to
 574   // indicate that the global stack has overflown
 575   bool has_overflown()           { return _has_overflown; }
 576   void set_has_overflown()       { _has_overflown = true; }
 577   void clear_has_overflown()     { _has_overflown = false; }
 578   bool restart_for_overflow()    { return _restart_for_overflow; }
 579 
 580   // Methods to enter the two overflow sync barriers
 581   void enter_first_sync_barrier(uint worker_id);
 582   void enter_second_sync_barrier(uint worker_id);
 583 
 584   ForceOverflowSettings* force_overflow_conc() {
 585     return &_force_overflow_conc;
 586   }
 587 
 588   ForceOverflowSettings* force_overflow_stw() {
 589     return &_force_overflow_stw;
 590   }
 591 
 592   ForceOverflowSettings* force_overflow() {
 593     if (concurrent()) {
 594       return force_overflow_conc();
 595     } else {
 596       return force_overflow_stw();
 597     }
 598   }
 599 
 600   // Live Data Counting data structures...
 601   // These data structures are initialized at the start of
 602   // marking. They are written to while marking is active.
 603   // They are aggregated during remark; the aggregated values
 604   // are then used to populate the _region_bm, _card_bm, and
 605   // the total live bytes, which are then subsequently updated
 606   // during cleanup.
 607 
 608   // An array of bitmaps (one bit map per task). Each bitmap
 609   // is used to record the cards spanned by the live objects
 610   // marked by that task/worker.
 611   BitMap*  _count_card_bitmaps;
 612 
 613   // Used to record the number of marked live bytes
 614   // (for each region, by worker thread).
 615   size_t** _count_marked_bytes;
 616 
 617   // Card index of the bottom of the G1 heap. Used for biasing indices into
 618   // the card bitmaps.
 619   intptr_t _heap_bottom_card_num;
 620 
 621   // Set to true when initialization is complete
 622   bool _completed_initialization;
 623 
 624 public:
 625   // Manipulation of the global mark stack.
 626   // Notice that the first mark_stack_push is CAS-based, whereas the
 627   // two below are Mutex-based. This is OK since the first one is only
 628   // called during evacuation pauses and doesn't compete with the
 629   // other two (which are called by the marking tasks during
 630   // concurrent marking or remark).
 631   bool mark_stack_push(oop p) {
 632     _markStack.par_push(p);
 633     if (_markStack.overflow()) {
 634       set_has_overflown();
 635       return false;
 636     }
 637     return true;
 638   }
 639   bool mark_stack_push(oop* arr, int n) {
 640     _markStack.par_push_arr(arr, n);
 641     if (_markStack.overflow()) {
 642       set_has_overflown();
 643       return false;
 644     }
 645     return true;
 646   }
 647   void mark_stack_pop(oop* arr, int max, int* n) {
 648     _markStack.par_pop_arr(arr, max, n);
 649   }
 650   size_t mark_stack_size()                { return _markStack.size(); }
 651   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 652   bool mark_stack_overflow()              { return _markStack.overflow(); }
 653   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 654 
 655   CMRootRegions* root_regions() { return &_root_regions; }
 656 
 657   bool concurrent_marking_in_progress() {
 658     return _concurrent_marking_in_progress;
 659   }
 660   void set_concurrent_marking_in_progress() {
 661     _concurrent_marking_in_progress = true;
 662   }
 663   void clear_concurrent_marking_in_progress() {
 664     _concurrent_marking_in_progress = false;
 665   }
 666 
 667   void update_accum_task_vtime(int i, double vtime) {
 668     _accum_task_vtime[i] += vtime;
 669   }
 670 
 671   double all_task_accum_vtime() {
 672     double ret = 0.0;
 673     for (uint i = 0; i < _max_worker_id; ++i)
 674       ret += _accum_task_vtime[i];
 675     return ret;
 676   }
 677 
 678   // Attempts to steal an object from the task queues of other tasks
 679   bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
 680     return _task_queues->steal(worker_id, hash_seed, obj);
 681   }
 682 
 683   ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
 684   ~ConcurrentMark();
 685 
 686   ConcurrentMarkThread* cmThread() { return _cmThread; }
 687 
 688   CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
 689   CMBitMap*   nextMarkBitMap() const { return _nextMarkBitMap; }
 690 
 691   // Returns the number of GC threads to be used in a concurrent
 692   // phase based on the number of GC threads being used in a STW
 693   // phase.
 694   uint scale_parallel_threads(uint n_par_threads);
 695 
 696   // Calculates the number of GC threads to be used in a concurrent phase.
 697   uint calc_parallel_marking_threads();
 698 
 699   // The following three are interaction between CM and
 700   // G1CollectedHeap
 701 
 702   // This notifies CM that a root during initial-mark needs to be
 703   // grayed. It is MT-safe. word_size is the size of the object in
 704   // words. It is passed explicitly as sometimes we cannot calculate
 705   // it from the given object because it might be in an inconsistent
 706   // state (e.g., in to-space and being copied). So the caller is
 707   // responsible for dealing with this issue (e.g., get the size from
 708   // the from-space image when the to-space image might be
 709   // inconsistent) and always passing the size. hr is the region that
 710   // contains the object and it's passed optionally from callers who
 711   // might already have it (no point in recalculating it).
 712   inline void grayRoot(oop obj, size_t word_size,
 713                        uint worker_id, HeapRegion* hr = NULL);
 714 
 715   // It iterates over the heap and for each object it comes across it
 716   // will dump the contents of its reference fields, as well as
 717   // liveness information for the object and its referents. The dump
 718   // will be written to a file with the following name:
 719   // G1PrintReachableBaseFile + "." + str.
 720   // vo decides whether the prev (vo == UsePrevMarking), the next
 721   // (vo == UseNextMarking) marking information, or the mark word
 722   // (vo == UseMarkWord) will be used to determine the liveness of
 723   // each object / referent.
 724   // If all is true, all objects in the heap will be dumped, otherwise
 725   // only the live ones. In the dump the following symbols / breviations
 726   // are used:
 727   //   M : an explicitly live object (its bitmap bit is set)
 728   //   > : an implicitly live object (over tams)
 729   //   O : an object outside the G1 heap (typically: in the perm gen)
 730   //   NOT : a reference field whose referent is not live
 731   //   AND MARKED : indicates that an object is both explicitly and
 732   //   implicitly live (it should be one or the other, not both)
 733   void print_reachable(const char* str,
 734                        VerifyOption vo, bool all) PRODUCT_RETURN;
 735 
 736   // Clear the next marking bitmap (will be called concurrently).
 737   void clearNextBitmap();
 738 
 739   // Return whether the next mark bitmap has no marks set.
 740   bool nextMarkBitmapIsClear();
 741 
 742   // These two do the work that needs to be done before and after the
 743   // initial root checkpoint. Since this checkpoint can be done at two
 744   // different points (i.e. an explicit pause or piggy-backed on a
 745   // young collection), then it's nice to be able to easily share the
 746   // pre/post code. It might be the case that we can put everything in
 747   // the post method. TP
 748   void checkpointRootsInitialPre();
 749   void checkpointRootsInitialPost();
 750 
 751   // Scan all the root regions and mark everything reachable from
 752   // them.
 753   void scanRootRegions();
 754 
 755   // Scan a single root region and mark everything reachable from it.
 756   void scanRootRegion(HeapRegion* hr, uint worker_id);
 757 
 758   // Do concurrent phase of marking, to a tentative transitive closure.
 759   void markFromRoots();
 760 
 761   void checkpointRootsFinal(bool clear_all_soft_refs);
 762   void checkpointRootsFinalWork();
 763   void cleanup();
 764   void completeCleanup();
 765 
 766   // Mark in the previous bitmap.  NB: this is usually read-only, so use
 767   // this carefully!
 768   inline void markPrev(oop p);
 769 
 770   // Clears marks for all objects in the given range, for the prev,
 771   // next, or both bitmaps.  NB: the previous bitmap is usually
 772   // read-only, so use this carefully!
 773   void clearRangePrevBitmap(MemRegion mr);
 774   void clearRangeNextBitmap(MemRegion mr);
 775   void clearRangeBothBitmaps(MemRegion mr);
 776 
 777   // Notify data structures that a GC has started.
 778   void note_start_of_gc() {
 779     _markStack.note_start_of_gc();
 780   }
 781 
 782   // Notify data structures that a GC is finished.
 783   void note_end_of_gc() {
 784     _markStack.note_end_of_gc();
 785   }
 786 
 787   // Verify that there are no CSet oops on the stacks (taskqueues /
 788   // global mark stack), enqueued SATB buffers, per-thread SATB
 789   // buffers, and fingers (global / per-task). The boolean parameters
 790   // decide which of the above data structures to verify. If marking
 791   // is not in progress, it's a no-op.
 792   void verify_no_cset_oops(bool verify_stacks,
 793                            bool verify_enqueued_buffers,
 794                            bool verify_thread_buffers,
 795                            bool verify_fingers) PRODUCT_RETURN;
 796 
 797   // It is called at the end of an evacuation pause during marking so
 798   // that CM is notified of where the new end of the heap is. It
 799   // doesn't do anything if concurrent_marking_in_progress() is false,
 800   // unless the force parameter is true.
 801   void update_g1_committed(bool force = false);
 802 
 803   bool isMarked(oop p) const {
 804     assert(p != NULL && p->is_oop(), "expected an oop");
 805     HeapWord* addr = (HeapWord*)p;
 806     assert(addr >= _nextMarkBitMap->startWord() ||
 807            addr < _nextMarkBitMap->endWord(), "in a region");
 808 
 809     return _nextMarkBitMap->isMarked(addr);
 810   }
 811 
 812   inline bool not_yet_marked(oop p) const;
 813 
 814   // XXX Debug code
 815   bool containing_card_is_marked(void* p);
 816   bool containing_cards_are_marked(void* start, void* last);
 817 
 818   bool isPrevMarked(oop p) const {
 819     assert(p != NULL && p->is_oop(), "expected an oop");
 820     HeapWord* addr = (HeapWord*)p;
 821     assert(addr >= _prevMarkBitMap->startWord() ||
 822            addr < _prevMarkBitMap->endWord(), "in a region");
 823 
 824     return _prevMarkBitMap->isMarked(addr);
 825   }
 826 
 827   inline bool do_yield_check(uint worker_i = 0);
 828 
 829   // Called to abort the marking cycle after a Full GC takes place.
 830   void abort();
 831 
 832   bool has_aborted()      { return _has_aborted; }
 833 
 834   const GCId& concurrent_gc_id();
 835 
 836   // This prints the global/local fingers. It is used for debugging.
 837   NOT_PRODUCT(void print_finger();)
 838 
 839   void print_summary_info();
 840 
 841   void print_worker_threads_on(outputStream* st) const;
 842 
 843   void print_on_error(outputStream* st) const;
 844 
 845   // The following indicate whether a given verbose level has been
 846   // set. Notice that anything above stats is conditional to
 847   // _MARKING_VERBOSE_ having been set to 1
 848   bool verbose_stats() {
 849     return _verbose_level >= stats_verbose;
 850   }
 851   bool verbose_low() {
 852     return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
 853   }
 854   bool verbose_medium() {
 855     return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
 856   }
 857   bool verbose_high() {
 858     return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
 859   }
 860 
 861   // Liveness counting
 862 
 863   // Utility routine to set an exclusive range of cards on the given
 864   // card liveness bitmap
 865   inline void set_card_bitmap_range(BitMap* card_bm,
 866                                     BitMap::idx_t start_idx,
 867                                     BitMap::idx_t end_idx,
 868                                     bool is_par);
 869 
 870   // Returns the card number of the bottom of the G1 heap.
 871   // Used in biasing indices into accounting card bitmaps.
 872   intptr_t heap_bottom_card_num() const {
 873     return _heap_bottom_card_num;
 874   }
 875 
 876   // Returns the card bitmap for a given task or worker id.
 877   BitMap* count_card_bitmap_for(uint worker_id) {
 878     assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
 879     assert(_count_card_bitmaps != NULL, "uninitialized");
 880     BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
 881     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
 882     return task_card_bm;
 883   }
 884 
 885   // Returns the array containing the marked bytes for each region,
 886   // for the given worker or task id.
 887   size_t* count_marked_bytes_array_for(uint worker_id) {
 888     assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
 889     assert(_count_marked_bytes != NULL, "uninitialized");
 890     size_t* marked_bytes_array = _count_marked_bytes[worker_id];
 891     assert(marked_bytes_array != NULL, "uninitialized");
 892     return marked_bytes_array;
 893   }
 894 
 895   // Returns the index in the liveness accounting card table bitmap
 896   // for the given address
 897   inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
 898 
 899   // Counts the size of the given memory region in the the given
 900   // marked_bytes array slot for the given HeapRegion.
 901   // Sets the bits in the given card bitmap that are associated with the
 902   // cards that are spanned by the memory region.
 903   inline void count_region(MemRegion mr, HeapRegion* hr,
 904                            size_t* marked_bytes_array,
 905                            BitMap* task_card_bm);
 906 
 907   // Counts the given memory region in the task/worker counting
 908   // data structures for the given worker id.
 909   inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
 910 
 911   // Counts the given memory region in the task/worker counting
 912   // data structures for the given worker id.
 913   inline void count_region(MemRegion mr, uint worker_id);
 914 
 915   // Counts the given object in the given task/worker counting
 916   // data structures.
 917   inline void count_object(oop obj, HeapRegion* hr,
 918                            size_t* marked_bytes_array,
 919                            BitMap* task_card_bm);
 920 
 921   // Counts the given object in the task/worker counting data
 922   // structures for the given worker id.
 923   inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
 924 
 925   // Attempts to mark the given object and, if successful, counts
 926   // the object in the given task/worker counting structures.
 927   inline bool par_mark_and_count(oop obj, HeapRegion* hr,
 928                                  size_t* marked_bytes_array,
 929                                  BitMap* task_card_bm);
 930 
 931   // Attempts to mark the given object and, if successful, counts
 932   // the object in the task/worker counting structures for the
 933   // given worker id.
 934   inline bool par_mark_and_count(oop obj, size_t word_size,
 935                                  HeapRegion* hr, uint worker_id);
 936 
 937   // Attempts to mark the given object and, if successful, counts
 938   // the object in the task/worker counting structures for the
 939   // given worker id.
 940   inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
 941 
 942   // Similar to the above routine but we don't know the heap region that
 943   // contains the object to be marked/counted, which this routine looks up.
 944   inline bool par_mark_and_count(oop obj, uint worker_id);
 945 
 946   // Similar to the above routine but there are times when we cannot
 947   // safely calculate the size of obj due to races and we, therefore,
 948   // pass the size in as a parameter. It is the caller's responsibility
 949   // to ensure that the size passed in for obj is valid.
 950   inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
 951 
 952   // Unconditionally mark the given object, and unconditionally count
 953   // the object in the counting structures for worker id 0.
 954   // Should *not* be called from parallel code.
 955   inline bool mark_and_count(oop obj, HeapRegion* hr);
 956 
 957   // Similar to the above routine but we don't know the heap region that
 958   // contains the object to be marked/counted, which this routine looks up.
 959   // Should *not* be called from parallel code.
 960   inline bool mark_and_count(oop obj);
 961 
 962   // Returns true if initialization was successfully completed.
 963   bool completed_initialization() const {
 964     return _completed_initialization;
 965   }
 966 
 967 protected:
 968   // Clear all the per-task bitmaps and arrays used to store the
 969   // counting data.
 970   void clear_all_count_data();
 971 
 972   // Aggregates the counting data for each worker/task
 973   // that was constructed while marking. Also sets
 974   // the amount of marked bytes for each region and
 975   // the top at concurrent mark count.
 976   void aggregate_count_data();
 977 
 978   // Verification routine
 979   void verify_count_data();
 980 };
 981 
 982 // A class representing a marking task.
 983 class CMTask : public TerminatorTerminator {
 984 private:
 985   enum PrivateConstants {
 986     // the regular clock call is called once the scanned words reaches
 987     // this limit
 988     words_scanned_period          = 12*1024,
 989     // the regular clock call is called once the number of visited
 990     // references reaches this limit
 991     refs_reached_period           = 384,
 992     // initial value for the hash seed, used in the work stealing code
 993     init_hash_seed                = 17,
 994     // how many entries will be transferred between global stack and
 995     // local queues
 996     global_stack_transfer_size    = 16
 997   };
 998 
 999   uint                        _worker_id;
1000   G1CollectedHeap*            _g1h;
1001   ConcurrentMark*             _cm;
1002   CMBitMap*                   _nextMarkBitMap;
1003   // the task queue of this task
1004   CMTaskQueue*                _task_queue;
1005 private:
1006   // the task queue set---needed for stealing
1007   CMTaskQueueSet*             _task_queues;
1008   // indicates whether the task has been claimed---this is only  for
1009   // debugging purposes
1010   bool                        _claimed;
1011 
1012   // number of calls to this task
1013   int                         _calls;
1014 
1015   // when the virtual timer reaches this time, the marking step should
1016   // exit
1017   double                      _time_target_ms;
1018   // the start time of the current marking step
1019   double                      _start_time_ms;
1020 
1021   // the oop closure used for iterations over oops
1022   G1CMOopClosure*             _cm_oop_closure;
1023 
1024   // the region this task is scanning, NULL if we're not scanning any
1025   HeapRegion*                 _curr_region;
1026   // the local finger of this task, NULL if we're not scanning a region
1027   HeapWord*                   _finger;
1028   // limit of the region this task is scanning, NULL if we're not scanning one
1029   HeapWord*                   _region_limit;
1030 
1031   // the number of words this task has scanned
1032   size_t                      _words_scanned;
1033   // When _words_scanned reaches this limit, the regular clock is
1034   // called. Notice that this might be decreased under certain
1035   // circumstances (i.e. when we believe that we did an expensive
1036   // operation).
1037   size_t                      _words_scanned_limit;
1038   // the initial value of _words_scanned_limit (i.e. what it was
1039   // before it was decreased).
1040   size_t                      _real_words_scanned_limit;
1041 
1042   // the number of references this task has visited
1043   size_t                      _refs_reached;
1044   // When _refs_reached reaches this limit, the regular clock is
1045   // called. Notice this this might be decreased under certain
1046   // circumstances (i.e. when we believe that we did an expensive
1047   // operation).
1048   size_t                      _refs_reached_limit;
1049   // the initial value of _refs_reached_limit (i.e. what it was before
1050   // it was decreased).
1051   size_t                      _real_refs_reached_limit;
1052 
1053   // used by the work stealing stuff
1054   int                         _hash_seed;
1055   // if this is true, then the task has aborted for some reason
1056   bool                        _has_aborted;
1057   // set when the task aborts because it has met its time quota
1058   bool                        _has_timed_out;
1059   // true when we're draining SATB buffers; this avoids the task
1060   // aborting due to SATB buffers being available (as we're already
1061   // dealing with them)
1062   bool                        _draining_satb_buffers;
1063 
1064   // number sequence of past step times
1065   NumberSeq                   _step_times_ms;
1066   // elapsed time of this task
1067   double                      _elapsed_time_ms;
1068   // termination time of this task
1069   double                      _termination_time_ms;
1070   // when this task got into the termination protocol
1071   double                      _termination_start_time_ms;
1072 
1073   // true when the task is during a concurrent phase, false when it is
1074   // in the remark phase (so, in the latter case, we do not have to
1075   // check all the things that we have to check during the concurrent
1076   // phase, i.e. SATB buffer availability...)
1077   bool                        _concurrent;
1078 
1079   TruncatedSeq                _marking_step_diffs_ms;
1080 
1081   // Counting data structures. Embedding the task's marked_bytes_array
1082   // and card bitmap into the actual task saves having to go through
1083   // the ConcurrentMark object.
1084   size_t*                     _marked_bytes_array;
1085   BitMap*                     _card_bm;
1086 
1087   // LOTS of statistics related with this task
1088 #if _MARKING_STATS_
1089   NumberSeq                   _all_clock_intervals_ms;
1090   double                      _interval_start_time_ms;
1091 
1092   int                         _aborted;
1093   int                         _aborted_overflow;
1094   int                         _aborted_cm_aborted;
1095   int                         _aborted_yield;
1096   int                         _aborted_timed_out;
1097   int                         _aborted_satb;
1098   int                         _aborted_termination;
1099 
1100   int                         _steal_attempts;
1101   int                         _steals;
1102 
1103   int                         _clock_due_to_marking;
1104   int                         _clock_due_to_scanning;
1105 
1106   int                         _local_pushes;
1107   int                         _local_pops;
1108   int                         _local_max_size;
1109   int                         _objs_scanned;
1110 
1111   int                         _global_pushes;
1112   int                         _global_pops;
1113   int                         _global_max_size;
1114 
1115   int                         _global_transfers_to;
1116   int                         _global_transfers_from;
1117 
1118   int                         _regions_claimed;
1119   int                         _objs_found_on_bitmap;
1120 
1121   int                         _satb_buffers_processed;
1122 #endif // _MARKING_STATS_
1123 
1124   // it updates the local fields after this task has claimed
1125   // a new region to scan
1126   void setup_for_region(HeapRegion* hr);
1127   // it brings up-to-date the limit of the region
1128   void update_region_limit();
1129 
1130   // called when either the words scanned or the refs visited limit
1131   // has been reached
1132   void reached_limit();
1133   // recalculates the words scanned and refs visited limits
1134   void recalculate_limits();
1135   // decreases the words scanned and refs visited limits when we reach
1136   // an expensive operation
1137   void decrease_limits();
1138   // it checks whether the words scanned or refs visited reached their
1139   // respective limit and calls reached_limit() if they have
1140   void check_limits() {
1141     if (_words_scanned >= _words_scanned_limit ||
1142         _refs_reached >= _refs_reached_limit) {
1143       reached_limit();
1144     }
1145   }
1146   // this is supposed to be called regularly during a marking step as
1147   // it checks a bunch of conditions that might cause the marking step
1148   // to abort
1149   void regular_clock_call();
1150   bool concurrent() { return _concurrent; }
1151 
1152 public:
1153   // It resets the task; it should be called right at the beginning of
1154   // a marking phase.
1155   void reset(CMBitMap* _nextMarkBitMap);
1156   // it clears all the fields that correspond to a claimed region.
1157   void clear_region_fields();
1158 
1159   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1160 
1161   // The main method of this class which performs a marking step
1162   // trying not to exceed the given duration. However, it might exit
1163   // prematurely, according to some conditions (i.e. SATB buffers are
1164   // available for processing).
1165   void do_marking_step(double target_ms,
1166                        bool do_termination,
1167                        bool is_serial);
1168 
1169   // These two calls start and stop the timer
1170   void record_start_time() {
1171     _elapsed_time_ms = os::elapsedTime() * 1000.0;
1172   }
1173   void record_end_time() {
1174     _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1175   }
1176 
1177   // returns the worker ID associated with this task.
1178   uint worker_id() { return _worker_id; }
1179 
1180   // From TerminatorTerminator. It determines whether this task should
1181   // exit the termination protocol after it's entered it.
1182   virtual bool should_exit_termination();
1183 
1184   // Resets the local region fields after a task has finished scanning a
1185   // region; or when they have become stale as a result of the region
1186   // being evacuated.
1187   void giveup_current_region();
1188 
1189   HeapWord* finger()            { return _finger; }
1190 
1191   bool has_aborted()            { return _has_aborted; }
1192   void set_has_aborted()        { _has_aborted = true; }
1193   void clear_has_aborted()      { _has_aborted = false; }
1194   bool has_timed_out()          { return _has_timed_out; }
1195   bool claimed()                { return _claimed; }
1196 
1197   void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1198 
1199   // It grays the object by marking it and, if necessary, pushing it
1200   // on the local queue
1201   inline void deal_with_reference(oop obj);
1202 
1203   // It scans an object and visits its children.
1204   void scan_object(oop obj);
1205 
1206   // It pushes an object on the local queue.
1207   inline void push(oop obj);
1208 
1209   // These two move entries to/from the global stack.
1210   void move_entries_to_global_stack();
1211   void get_entries_from_global_stack();
1212 
1213   // It pops and scans objects from the local queue. If partially is
1214   // true, then it stops when the queue size is of a given limit. If
1215   // partially is false, then it stops when the queue is empty.
1216   void drain_local_queue(bool partially);
1217   // It moves entries from the global stack to the local queue and
1218   // drains the local queue. If partially is true, then it stops when
1219   // both the global stack and the local queue reach a given size. If
1220   // partially if false, it tries to empty them totally.
1221   void drain_global_stack(bool partially);
1222   // It keeps picking SATB buffers and processing them until no SATB
1223   // buffers are available.
1224   void drain_satb_buffers();
1225 
1226   // moves the local finger to a new location
1227   inline void move_finger_to(HeapWord* new_finger) {
1228     assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1229     _finger = new_finger;
1230   }
1231 
1232   CMTask(uint worker_id, ConcurrentMark *cm,
1233          size_t* marked_bytes, BitMap* card_bm,
1234          CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1235 
1236   // it prints statistics associated with this task
1237   void print_stats();
1238 
1239 #if _MARKING_STATS_
1240   void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1241 #endif // _MARKING_STATS_
1242 };
1243 
1244 // Class that's used to to print out per-region liveness
1245 // information. It's currently used at the end of marking and also
1246 // after we sort the old regions at the end of the cleanup operation.
1247 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1248 private:
1249   outputStream* _out;
1250 
1251   // Accumulators for these values.
1252   size_t _total_used_bytes;
1253   size_t _total_capacity_bytes;
1254   size_t _total_prev_live_bytes;
1255   size_t _total_next_live_bytes;
1256 
1257   // These are set up when we come across a "stars humongous" region
1258   // (as this is where most of this information is stored, not in the
1259   // subsequent "continues humongous" regions). After that, for every
1260   // region in a given humongous region series we deduce the right
1261   // values for it by simply subtracting the appropriate amount from
1262   // these fields. All these values should reach 0 after we've visited
1263   // the last region in the series.
1264   size_t _hum_used_bytes;
1265   size_t _hum_capacity_bytes;
1266   size_t _hum_prev_live_bytes;
1267   size_t _hum_next_live_bytes;
1268 
1269   // Accumulator for the remembered set size
1270   size_t _total_remset_bytes;
1271 
1272   // Accumulator for strong code roots memory size
1273   size_t _total_strong_code_roots_bytes;
1274 
1275   static double perc(size_t val, size_t total) {
1276     if (total == 0) {
1277       return 0.0;
1278     } else {
1279       return 100.0 * ((double) val / (double) total);
1280     }
1281   }
1282 
1283   static double bytes_to_mb(size_t val) {
1284     return (double) val / (double) M;
1285   }
1286 
1287   // See the .cpp file.
1288   size_t get_hum_bytes(size_t* hum_bytes);
1289   void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1290                      size_t* prev_live_bytes, size_t* next_live_bytes);
1291 
1292 public:
1293   // The header and footer are printed in the constructor and
1294   // destructor respectively.
1295   G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1296   virtual bool doHeapRegion(HeapRegion* r);
1297   ~G1PrintRegionLivenessInfoClosure();
1298 };
1299 
1300 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP