< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.hpp

Print this page




 306   G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 307 
 308   // Heap bounds
 309   MemRegion const         _heap;
 310 
 311   // Root region tracking and claiming
 312   G1CMRootRegions         _root_regions;
 313 
 314   // For grey objects
 315   G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
 316   HeapWord* volatile      _finger;            // The global finger, region aligned,
 317                                               // always pointing to the end of the
 318                                               // last claimed region
 319 
 320   uint                    _worker_id_offset;
 321   uint                    _max_num_tasks;    // Maximum number of marking tasks
 322   uint                    _num_active_tasks; // Number of tasks currently active
 323   G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 324 
 325   G1CMTaskQueueSet*       _task_queues;      // Task queue set
 326   ParallelTaskTerminator  _terminator;       // For termination
 327 
 328   // Two sync barriers that are used to synchronize tasks when an
 329   // overflow occurs. The algorithm is the following. All tasks enter
 330   // the first one to ensure that they have all stopped manipulating
 331   // the global data structures. After they exit it, they re-initialize
 332   // their data structures and task 0 re-initializes the global data
 333   // structures. Then, they enter the second sync barrier. This
 334   // ensure, that no task starts doing work before all data
 335   // structures (local and global) have been re-initialized. When they
 336   // exit it, they are free to start working again.
 337   WorkGangBarrierSync     _first_overflow_barrier_sync;
 338   WorkGangBarrierSync     _second_overflow_barrier_sync;
 339 
 340   // This is set by any task, when an overflow on the global data
 341   // structures is detected
 342   volatile bool           _has_overflown;
 343   // True: marking is concurrent, false: we're in remark
 344   volatile bool           _concurrent;
 345   // Set at the end of a Full GC so that marking aborts
 346   volatile bool           _has_aborted;


 395   // marking or when marking completes (via set_non_marking_state below).
 396   void reset_marking_for_restart();
 397 
 398   // We do this after we're done with marking so that the marking data
 399   // structures are initialized to a sensible and predictable state.
 400   void reset_at_marking_complete();
 401 
 402   // Called to indicate how many threads are currently active.
 403   void set_concurrency(uint active_tasks);
 404 
 405   // Should be called to indicate which phase we're in (concurrent
 406   // mark or remark) and how many threads are currently active.
 407   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 408 
 409   // Prints all gathered CM-related statistics
 410   void print_stats();
 411 
 412   HeapWord*               finger()          { return _finger;   }
 413   bool                    concurrent()      { return _concurrent; }
 414   uint                    active_tasks()    { return _num_active_tasks; }
 415   ParallelTaskTerminator* terminator()      { return &_terminator; }
 416 
 417   // Claims the next available region to be scanned by a marking
 418   // task/thread. It might return NULL if the next region is empty or
 419   // we have run out of regions. In the latter case, out_of_regions()
 420   // determines whether we've really run out of regions or the task
 421   // should call claim_region() again. This might seem a bit
 422   // awkward. Originally, the code was written so that claim_region()
 423   // either successfully returned with a non-empty region or there
 424   // were no more regions to be claimed. The problem with this was
 425   // that, in certain circumstances, it iterated over large chunks of
 426   // the heap finding only empty regions and, while it was working, it
 427   // was preventing the calling task to call its regular clock
 428   // method. So, this way, each task will spend very little time in
 429   // claim_region() and is allowed to call the regular clock method
 430   // frequently.
 431   HeapRegion* claim_region(uint worker_id);
 432 
 433   // Determines whether we've run out of regions to scan. Note that
 434   // the finger can point past the heap end in case the heap was expanded
 435   // to satisfy an allocation without doing a GC. This is fine, because all




 306   G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 307 
 308   // Heap bounds
 309   MemRegion const         _heap;
 310 
 311   // Root region tracking and claiming
 312   G1CMRootRegions         _root_regions;
 313 
 314   // For grey objects
 315   G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
 316   HeapWord* volatile      _finger;            // The global finger, region aligned,
 317                                               // always pointing to the end of the
 318                                               // last claimed region
 319 
 320   uint                    _worker_id_offset;
 321   uint                    _max_num_tasks;    // Maximum number of marking tasks
 322   uint                    _num_active_tasks; // Number of tasks currently active
 323   G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 324 
 325   G1CMTaskQueueSet*       _task_queues; // Task queue set
 326   TaskTerminator          _terminator;  // For termination
 327 
 328   // Two sync barriers that are used to synchronize tasks when an
 329   // overflow occurs. The algorithm is the following. All tasks enter
 330   // the first one to ensure that they have all stopped manipulating
 331   // the global data structures. After they exit it, they re-initialize
 332   // their data structures and task 0 re-initializes the global data
 333   // structures. Then, they enter the second sync barrier. This
 334   // ensure, that no task starts doing work before all data
 335   // structures (local and global) have been re-initialized. When they
 336   // exit it, they are free to start working again.
 337   WorkGangBarrierSync     _first_overflow_barrier_sync;
 338   WorkGangBarrierSync     _second_overflow_barrier_sync;
 339 
 340   // This is set by any task, when an overflow on the global data
 341   // structures is detected
 342   volatile bool           _has_overflown;
 343   // True: marking is concurrent, false: we're in remark
 344   volatile bool           _concurrent;
 345   // Set at the end of a Full GC so that marking aborts
 346   volatile bool           _has_aborted;


 395   // marking or when marking completes (via set_non_marking_state below).
 396   void reset_marking_for_restart();
 397 
 398   // We do this after we're done with marking so that the marking data
 399   // structures are initialized to a sensible and predictable state.
 400   void reset_at_marking_complete();
 401 
 402   // Called to indicate how many threads are currently active.
 403   void set_concurrency(uint active_tasks);
 404 
 405   // Should be called to indicate which phase we're in (concurrent
 406   // mark or remark) and how many threads are currently active.
 407   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 408 
 409   // Prints all gathered CM-related statistics
 410   void print_stats();
 411 
 412   HeapWord*               finger()           { return _finger;   }
 413   bool                    concurrent()       { return _concurrent; }
 414   uint                    active_tasks()     { return _num_active_tasks; }
 415   ParallelTaskTerminator* terminator() const { return _terminator.terminator(); }
 416 
 417   // Claims the next available region to be scanned by a marking
 418   // task/thread. It might return NULL if the next region is empty or
 419   // we have run out of regions. In the latter case, out_of_regions()
 420   // determines whether we've really run out of regions or the task
 421   // should call claim_region() again. This might seem a bit
 422   // awkward. Originally, the code was written so that claim_region()
 423   // either successfully returned with a non-empty region or there
 424   // were no more regions to be claimed. The problem with this was
 425   // that, in certain circumstances, it iterated over large chunks of
 426   // the heap finding only empty regions and, while it was working, it
 427   // was preventing the calling task to call its regular clock
 428   // method. So, this way, each task will spend very little time in
 429   // claim_region() and is allowed to call the regular clock method
 430   // frequently.
 431   HeapRegion* claim_region(uint worker_id);
 432 
 433   // Determines whether we've run out of regions to scan. Note that
 434   // the finger can point past the heap end in case the heap was expanded
 435   // to satisfy an allocation without doing a GC. This is fine, because all


< prev index next >