< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.hpp

Print this page
rev 57840 : imported patch 8215297-remove-ptt
rev 57842 : [mq]: 8238220-rename-owsttaskterminator


  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1CONCURRENTMARK_HPP
  26 #define SHARE_GC_G1_G1CONCURRENTMARK_HPP
  27 
  28 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
  29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
  30 #include "gc/g1/g1HeapVerifier.hpp"
  31 #include "gc/g1/g1RegionMarkStatsCache.hpp"
  32 #include "gc/g1/heapRegionSet.hpp"
  33 #include "gc/shared/owstTaskTerminator.hpp"
  34 #include "gc/shared/taskqueue.hpp"
  35 #include "gc/shared/verifyOption.hpp"
  36 #include "gc/shared/workgroup.hpp"
  37 #include "memory/allocation.hpp"
  38 #include "utilities/compilerWarnings.hpp"
  39 
  40 class ConcurrentGCTimer;
  41 class G1ConcurrentMarkThread;
  42 class G1CollectedHeap;
  43 class G1CMOopClosure;
  44 class G1CMTask;
  45 class G1ConcurrentMark;
  46 class G1OldTracer;
  47 class G1RegionToSpaceMapper;
  48 class G1SurvivorRegions;
  49 class ThreadClosure;
  50 
  51 PRAGMA_DIAG_PUSH
  52 // warning C4522: multiple assignment operators specified
  53 PRAGMA_DISABLE_MSVC_WARNING(4522)


 312   G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 313 
 314   // Heap bounds
 315   MemRegion const         _heap;
 316 
 317   // Root region tracking and claiming
 318   G1CMRootMemRegions         _root_regions;
 319 
 320   // For grey objects
 321   G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
 322   HeapWord* volatile      _finger;            // The global finger, region aligned,
 323                                               // always pointing to the end of the
 324                                               // last claimed region
 325 
 326   uint                    _worker_id_offset;
 327   uint                    _max_num_tasks;    // Maximum number of marking tasks
 328   uint                    _num_active_tasks; // Number of tasks currently active
 329   G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 330 
 331   G1CMTaskQueueSet*       _task_queues; // Task queue set
 332   OWSTTaskTerminator      _terminator;  // For termination
 333 
 334   // Two sync barriers that are used to synchronize tasks when an
 335   // overflow occurs. The algorithm is the following. All tasks enter
 336   // the first one to ensure that they have all stopped manipulating
 337   // the global data structures. After they exit it, they re-initialize
 338   // their data structures and task 0 re-initializes the global data
 339   // structures. Then, they enter the second sync barrier. This
 340   // ensure, that no task starts doing work before all data
 341   // structures (local and global) have been re-initialized. When they
 342   // exit it, they are free to start working again.
 343   WorkGangBarrierSync     _first_overflow_barrier_sync;
 344   WorkGangBarrierSync     _second_overflow_barrier_sync;
 345 
 346   // This is set by any task, when an overflow on the global data
 347   // structures is detected
 348   volatile bool           _has_overflown;
 349   // True: marking is concurrent, false: we're in remark
 350   volatile bool           _concurrent;
 351   // Set at the end of a Full GC so that marking aborts
 352   volatile bool           _has_aborted;


 401   // marking or when marking completes (via set_non_marking_state below).
 402   void reset_marking_for_restart();
 403 
 404   // We do this after we're done with marking so that the marking data
 405   // structures are initialized to a sensible and predictable state.
 406   void reset_at_marking_complete();
 407 
 408   // Called to indicate how many threads are currently active.
 409   void set_concurrency(uint active_tasks);
 410 
 411   // Should be called to indicate which phase we're in (concurrent
 412   // mark or remark) and how many threads are currently active.
 413   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 414 
 415   // Prints all gathered CM-related statistics
 416   void print_stats();
 417 
 418   HeapWord*           finger()       { return _finger;   }
 419   bool                concurrent()   { return _concurrent; }
 420   uint                active_tasks() { return _num_active_tasks; }
 421   OWSTTaskTerminator* terminator()   { return &_terminator; }
 422 
 423   // Claims the next available region to be scanned by a marking
 424   // task/thread. It might return NULL if the next region is empty or
 425   // we have run out of regions. In the latter case, out_of_regions()
 426   // determines whether we've really run out of regions or the task
 427   // should call claim_region() again. This might seem a bit
 428   // awkward. Originally, the code was written so that claim_region()
 429   // either successfully returned with a non-empty region or there
 430   // were no more regions to be claimed. The problem with this was
 431   // that, in certain circumstances, it iterated over large chunks of
 432   // the heap finding only empty regions and, while it was working, it
 433   // was preventing the calling task to call its regular clock
 434   // method. So, this way, each task will spend very little time in
 435   // claim_region() and is allowed to call the regular clock method
 436   // frequently.
 437   HeapRegion* claim_region(uint worker_id);
 438 
 439   // Determines whether we've run out of regions to scan. Note that
 440   // the finger can point past the heap end in case the heap was expanded
 441   // to satisfy an allocation without doing a GC. This is fine, because all




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1CONCURRENTMARK_HPP
  26 #define SHARE_GC_G1_G1CONCURRENTMARK_HPP
  27 
  28 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
  29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
  30 #include "gc/g1/g1HeapVerifier.hpp"
  31 #include "gc/g1/g1RegionMarkStatsCache.hpp"
  32 #include "gc/g1/heapRegionSet.hpp"
  33 #include "gc/shared/taskTerminator.hpp"
  34 #include "gc/shared/taskqueue.hpp"
  35 #include "gc/shared/verifyOption.hpp"
  36 #include "gc/shared/workgroup.hpp"
  37 #include "memory/allocation.hpp"
  38 #include "utilities/compilerWarnings.hpp"
  39 
  40 class ConcurrentGCTimer;
  41 class G1ConcurrentMarkThread;
  42 class G1CollectedHeap;
  43 class G1CMOopClosure;
  44 class G1CMTask;
  45 class G1ConcurrentMark;
  46 class G1OldTracer;
  47 class G1RegionToSpaceMapper;
  48 class G1SurvivorRegions;
  49 class ThreadClosure;
  50 
  51 PRAGMA_DIAG_PUSH
  52 // warning C4522: multiple assignment operators specified
  53 PRAGMA_DISABLE_MSVC_WARNING(4522)


 312   G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 313 
 314   // Heap bounds
 315   MemRegion const         _heap;
 316 
 317   // Root region tracking and claiming
 318   G1CMRootMemRegions         _root_regions;
 319 
 320   // For grey objects
 321   G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
 322   HeapWord* volatile      _finger;            // The global finger, region aligned,
 323                                               // always pointing to the end of the
 324                                               // last claimed region
 325 
 326   uint                    _worker_id_offset;
 327   uint                    _max_num_tasks;    // Maximum number of marking tasks
 328   uint                    _num_active_tasks; // Number of tasks currently active
 329   G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 330 
 331   G1CMTaskQueueSet*       _task_queues; // Task queue set
 332   TaskTerminator          _terminator;  // For termination
 333 
 334   // Two sync barriers that are used to synchronize tasks when an
 335   // overflow occurs. The algorithm is the following. All tasks enter
 336   // the first one to ensure that they have all stopped manipulating
 337   // the global data structures. After they exit it, they re-initialize
 338   // their data structures and task 0 re-initializes the global data
 339   // structures. Then, they enter the second sync barrier. This
 340   // ensure, that no task starts doing work before all data
 341   // structures (local and global) have been re-initialized. When they
 342   // exit it, they are free to start working again.
 343   WorkGangBarrierSync     _first_overflow_barrier_sync;
 344   WorkGangBarrierSync     _second_overflow_barrier_sync;
 345 
 346   // This is set by any task, when an overflow on the global data
 347   // structures is detected
 348   volatile bool           _has_overflown;
 349   // True: marking is concurrent, false: we're in remark
 350   volatile bool           _concurrent;
 351   // Set at the end of a Full GC so that marking aborts
 352   volatile bool           _has_aborted;


 401   // marking or when marking completes (via set_non_marking_state below).
 402   void reset_marking_for_restart();
 403 
 404   // We do this after we're done with marking so that the marking data
 405   // structures are initialized to a sensible and predictable state.
 406   void reset_at_marking_complete();
 407 
 408   // Called to indicate how many threads are currently active.
 409   void set_concurrency(uint active_tasks);
 410 
 411   // Should be called to indicate which phase we're in (concurrent
 412   // mark or remark) and how many threads are currently active.
 413   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 414 
 415   // Prints all gathered CM-related statistics
 416   void print_stats();
 417 
 418   HeapWord*           finger()       { return _finger;   }
 419   bool                concurrent()   { return _concurrent; }
 420   uint                active_tasks() { return _num_active_tasks; }
 421   TaskTerminator*     terminator()   { return &_terminator; }
 422 
 423   // Claims the next available region to be scanned by a marking
 424   // task/thread. It might return NULL if the next region is empty or
 425   // we have run out of regions. In the latter case, out_of_regions()
 426   // determines whether we've really run out of regions or the task
 427   // should call claim_region() again. This might seem a bit
 428   // awkward. Originally, the code was written so that claim_region()
 429   // either successfully returned with a non-empty region or there
 430   // were no more regions to be claimed. The problem with this was
 431   // that, in certain circumstances, it iterated over large chunks of
 432   // the heap finding only empty regions and, while it was working, it
 433   // was preventing the calling task to call its regular clock
 434   // method. So, this way, each task will spend very little time in
 435   // claim_region() and is allowed to call the regular clock method
 436   // frequently.
 437   HeapRegion* claim_region(uint worker_id);
 438 
 439   // Determines whether we've run out of regions to scan. Note that
 440   // the finger can point past the heap end in case the heap was expanded
 441   // to satisfy an allocation without doing a GC. This is fine, because all


< prev index next >