< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.hpp

Print this page
rev 49484 : imported patch 8197573-remove-secondary-free-list
rev 49488 : imported patch 8197932-better-split-work-during-rebuild-phase
rev 49496 : imported patch 8151171-renamings
rev 49497 : [mq]: 8200234-g1concurrentmark-refactorings


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  27 
  28 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
  29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"

  30 #include "gc/g1/g1RegionMarkStatsCache.hpp"
  31 #include "gc/g1/heapRegionSet.hpp"
  32 #include "gc/shared/taskqueue.hpp"
  33 #include "memory/allocation.hpp"
  34 
  35 class ConcurrentGCTimer;
  36 class ConcurrentMarkThread;
  37 class G1CollectedHeap;
  38 class G1CMTask;
  39 class G1ConcurrentMark;
  40 class G1OldTracer;
  41 class G1RegionToSpaceMapper;
  42 class G1SurvivorRegions;
  43 
  44 #ifdef _MSC_VER
  45 #pragma warning(push)
  46 // warning C4522: multiple assignment operators specified
  47 #pragma warning(disable:4522)
  48 #endif
  49 


 348   volatile bool          _concurrent_marking_in_progress;
 349 
 350   ConcurrentGCTimer*     _gc_timer_cm;
 351 
 352   G1OldTracer*           _gc_tracer_cm;
 353 
 354   // Timing statistics. All of them are in ms
 355   NumberSeq _init_times;
 356   NumberSeq _remark_times;
 357   NumberSeq _remark_mark_times;
 358   NumberSeq _remark_weak_ref_times;
 359   NumberSeq _cleanup_times;
 360   double    _total_cleanup_time;
 361 
 362   double*   _accum_task_vtime;   // Accumulated task vtime
 363 
 364   WorkGang* _concurrent_workers;
 365   uint      _num_concurrent_workers; // The number of marking worker threads we're using
 366   uint      _max_concurrent_workers; // Maximum number of marking worker threads
 367 


 368   void finalize_marking();
 369 
 370   void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
 371   void weak_refs_work(bool clear_all_soft_refs);
 372 


 373   void swap_mark_bitmaps();
 374 
 375   void reclaim_empty_regions();
 376 




 377   // Resets the global marking data structures, as well as the
 378   // task local ones; should be called during initial mark.
 379   void reset();
 380 
 381   // Resets all the marking data structures. Called when we have to restart
 382   // marking or when marking completes (via set_non_marking_state below).
 383   void reset_marking_for_restart();
 384 
 385   // We do this after we're done with marking so that the marking data
 386   // structures are initialized to a sensible and predictable state.
 387   void reset_at_marking_complete();
 388 
 389   // Called to indicate how many threads are currently active.
 390   void set_concurrency(uint active_tasks);
 391 
 392   // Should be called to indicate which phase we're in (concurrent
 393   // mark or remark) and how many threads are currently active.
 394   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 395 
 396   // Prints all gathered CM-related statistics


 430     // we can only compare against _max_num_tasks.
 431     assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks);
 432     return _tasks[id];
 433   }
 434 
 435   // Access / manipulation of the overflow flag which is set to
 436   // indicate that the global stack has overflown
 437   bool has_overflown()           { return _has_overflown; }
 438   void set_has_overflown()       { _has_overflown = true; }
 439   void clear_has_overflown()     { _has_overflown = false; }
 440   bool restart_for_overflow()    { return _restart_for_overflow; }
 441 
 442   // Methods to enter the two overflow sync barriers
 443   void enter_first_sync_barrier(uint worker_id);
 444   void enter_second_sync_barrier(uint worker_id);
 445 
 446   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 447   // true, periodically insert checks to see if this method should exit prematurely.
 448   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 449 
 450   // Clear statistics gathered during the concurrent cycle for the given region after
 451   // it has been reclaimed.
 452   void clear_statistics_in_region(uint region_idx);
 453   // Region statistics gathered during marking.
 454   G1RegionMarkStats* _region_mark_stats;
 455   // Top pointer for each region at the start of the rebuild remembered set process
 456   // for regions which remembered sets need to be rebuilt. A NULL for a given region
 457   // means that this region does not be scanned during the rebuilding remembered
 458   // set phase at all.
 459   HeapWord* volatile* _top_at_rebuild_starts;
 460 public:
 461   void add_to_liveness(uint worker_id, oop const obj, size_t size);
 462   // Liveness of the given region as determined by concurrent marking, i.e. the amount of
 463   // live words between bottom and nTAMS.
 464   size_t liveness(uint region)  { return _region_mark_stats[region]._live_words; }
 465 
 466   // Sets the internal top_at_region_start for the given region to current top of the region.
 467   inline void update_top_at_rebuild_start(HeapRegion* r);
 468   // TARS for the given region during remembered set rebuilding.
 469   inline HeapWord* top_at_rebuild_start(uint region) const;
 470 



 471   // Notification for eagerly reclaimed regions to clean up.
 472   void humongous_object_eagerly_reclaimed(HeapRegion* r);
 473   // Manipulation of the global mark stack.
 474   // The push and pop operations are used by tasks for transfers
 475   // between task-local queues and the global mark stack.
 476   bool mark_stack_push(G1TaskQueueEntry* arr) {
 477     if (!_global_mark_stack.par_push_chunk(arr)) {
 478       set_has_overflown();
 479       return false;
 480     }
 481     return true;
 482   }
 483   bool mark_stack_pop(G1TaskQueueEntry* arr) {
 484     return _global_mark_stack.par_pop_chunk(arr);
 485   }
 486   size_t mark_stack_size() const                { return _global_mark_stack.size(); }
 487   size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
 488   bool mark_stack_empty() const                 { return _global_mark_stack.is_empty(); }
 489 
 490   G1CMRootRegions* root_regions() { return &_root_regions; }




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  27 
  28 #include "gc/g1/g1ConcurrentMarkBitMap.hpp"
  29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
  30 #include "gc/g1/g1HeapVerifier.hpp"
  31 #include "gc/g1/g1RegionMarkStatsCache.hpp"
  32 #include "gc/g1/heapRegionSet.hpp"
  33 #include "gc/shared/taskqueue.hpp"
  34 #include "memory/allocation.hpp"
  35 
  36 class ConcurrentGCTimer;
  37 class ConcurrentMarkThread;
  38 class G1CollectedHeap;
  39 class G1CMTask;
  40 class G1ConcurrentMark;
  41 class G1OldTracer;
  42 class G1RegionToSpaceMapper;
  43 class G1SurvivorRegions;
  44 
  45 #ifdef _MSC_VER
  46 #pragma warning(push)
  47 // warning C4522: multiple assignment operators specified
  48 #pragma warning(disable:4522)
  49 #endif
  50 


 349   volatile bool          _concurrent_marking_in_progress;
 350 
 351   ConcurrentGCTimer*     _gc_timer_cm;
 352 
 353   G1OldTracer*           _gc_tracer_cm;
 354 
 355   // Timing statistics. All of them are in ms
 356   NumberSeq _init_times;
 357   NumberSeq _remark_times;
 358   NumberSeq _remark_mark_times;
 359   NumberSeq _remark_weak_ref_times;
 360   NumberSeq _cleanup_times;
 361   double    _total_cleanup_time;
 362 
 363   double*   _accum_task_vtime;   // Accumulated task vtime
 364 
 365   WorkGang* _concurrent_workers;
 366   uint      _num_concurrent_workers; // The number of marking worker threads we're using
 367   uint      _max_concurrent_workers; // Maximum number of marking worker threads
 368 
 369   void verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller);
 370 
 371   void finalize_marking();
 372 
 373   void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
 374   void weak_refs_work(bool clear_all_soft_refs);
 375 
 376   void report_object_count();
 377 
 378   void swap_mark_bitmaps();
 379 
 380   void reclaim_empty_regions();
 381 
 382   // Clear statistics gathered during the concurrent cycle for the given region after
 383   // it has been reclaimed.
 384   void clear_statistics(HeapRegion* r);
 385 
 386   // Resets the global marking data structures, as well as the
 387   // task local ones; should be called during initial mark.
 388   void reset();
 389 
 390   // Resets all the marking data structures. Called when we have to restart
 391   // marking or when marking completes (via set_non_marking_state below).
 392   void reset_marking_for_restart();
 393 
 394   // We do this after we're done with marking so that the marking data
 395   // structures are initialized to a sensible and predictable state.
 396   void reset_at_marking_complete();
 397 
 398   // Called to indicate how many threads are currently active.
 399   void set_concurrency(uint active_tasks);
 400 
 401   // Should be called to indicate which phase we're in (concurrent
 402   // mark or remark) and how many threads are currently active.
 403   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 404 
 405   // Prints all gathered CM-related statistics


 439     // we can only compare against _max_num_tasks.
 440     assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks);
 441     return _tasks[id];
 442   }
 443 
 444   // Access / manipulation of the overflow flag which is set to
 445   // indicate that the global stack has overflown
 446   bool has_overflown()           { return _has_overflown; }
 447   void set_has_overflown()       { _has_overflown = true; }
 448   void clear_has_overflown()     { _has_overflown = false; }
 449   bool restart_for_overflow()    { return _restart_for_overflow; }
 450 
 451   // Methods to enter the two overflow sync barriers
 452   void enter_first_sync_barrier(uint worker_id);
 453   void enter_second_sync_barrier(uint worker_id);
 454 
 455   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 456   // true, periodically insert checks to see if this method should exit prematurely.
 457   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 458 



 459   // Region statistics gathered during marking.
 460   G1RegionMarkStats* _region_mark_stats;
 461   // Top pointer for each region at the start of the rebuild remembered set process
 462   // for regions which remembered sets need to be rebuilt. A NULL for a given region
 463   // means that this region does not be scanned during the rebuilding remembered
 464   // set phase at all.
 465   HeapWord* volatile* _top_at_rebuild_starts;
 466 public:
 467   void add_to_liveness(uint worker_id, oop const obj, size_t size);
 468   // Liveness of the given region as determined by concurrent marking, i.e. the amount of
 469   // live words between bottom and nTAMS.
 470   size_t liveness(uint region)  { return _region_mark_stats[region]._live_words; }
 471 
 472   // Sets the internal top_at_region_start for the given region to current top of the region.
 473   inline void update_top_at_rebuild_start(HeapRegion* r);
 474   // TARS for the given region during remembered set rebuilding.
 475   inline HeapWord* top_at_rebuild_start(uint region) const;
 476 
 477   // Clear statistics gathered during the concurrent cycle for the given region after
 478   // it has been reclaimed.
 479   void clear_statistics_in_region(uint region_idx);
 480   // Notification for eagerly reclaimed regions to clean up.
 481   void humongous_object_eagerly_reclaimed(HeapRegion* r);
 482   // Manipulation of the global mark stack.
 483   // The push and pop operations are used by tasks for transfers
 484   // between task-local queues and the global mark stack.
 485   bool mark_stack_push(G1TaskQueueEntry* arr) {
 486     if (!_global_mark_stack.par_push_chunk(arr)) {
 487       set_has_overflown();
 488       return false;
 489     }
 490     return true;
 491   }
 492   bool mark_stack_pop(G1TaskQueueEntry* arr) {
 493     return _global_mark_stack.par_pop_chunk(arr);
 494   }
 495   size_t mark_stack_size() const                { return _global_mark_stack.size(); }
 496   size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
 497   bool mark_stack_empty() const                 { return _global_mark_stack.is_empty(); }
 498 
 499   G1CMRootRegions* root_regions() { return &_root_regions; }


< prev index next >