< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.hpp

Print this page




 335   WorkGangBarrierSync     _second_overflow_barrier_sync;
 336 
 337   // This is set by any task, when an overflow on the global data
 338   // structures is detected
 339   volatile bool           _has_overflown;
 340   // True: marking is concurrent, false: we're in remark
 341   volatile bool           _concurrent;
 342   // Set at the end of a Full GC so that marking aborts
 343   volatile bool           _has_aborted;
 344 
 345   // Used when remark aborts due to an overflow to indicate that
 346   // another concurrent marking phase should start
 347   volatile bool           _restart_for_overflow;
 348 
 349   // This is true from the very start of concurrent marking until the
 350   // point when all the tasks complete their work. It is really used
 351   // to determine the points between the end of concurrent marking and
 352   // time of remark.
 353   volatile bool           _concurrent_marking_in_progress;
 354 
 355   // Keep track of whether we have started concurrent phase or not.
 356   bool                    _concurrent_phase_started;









 357 
 358   // All of these times are in ms
 359   NumberSeq _init_times;
 360   NumberSeq _remark_times;
 361   NumberSeq _remark_mark_times;
 362   NumberSeq _remark_weak_ref_times;
 363   NumberSeq _cleanup_times;
 364   double    _total_counting_time;
 365   double    _total_rs_scrub_time;
 366 
 367   double*   _accum_task_vtime;   // Accumulated task vtime
 368 
 369   WorkGang* _parallel_workers;
 370 
 371   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 372   void weakRefsWork(bool clear_all_soft_refs);
 373 
 374   void swapMarkBitMaps();
 375 
 376   // It resets the global marking data structures, as well as the


 468   // are then used to populate the _region_bm, _card_bm, and
 469   // the total live bytes, which are then subsequently updated
 470   // during cleanup.
 471 
 472   // An array of bitmaps (one bit map per task). Each bitmap
 473   // is used to record the cards spanned by the live objects
 474   // marked by that task/worker.
 475   BitMap*  _count_card_bitmaps;
 476 
 477   // Used to record the number of marked live bytes
 478   // (for each region, by worker thread).
 479   size_t** _count_marked_bytes;
 480 
 481   // Card index of the bottom of the G1 heap. Used for biasing indices into
 482   // the card bitmaps.
 483   intptr_t _heap_bottom_card_num;
 484 
 485   // Set to true when initialization is complete
 486   bool _completed_initialization;
 487 



 488 public:
 489   // Manipulation of the global mark stack.
 490   // The push and pop operations are used by tasks for transfers
 491   // between task-local queues and the global mark stack, and use
 492   // locking for concurrency safety.
 493   bool mark_stack_push(oop* arr, int n) {
 494     _markStack.par_push_arr(arr, n);
 495     if (_markStack.overflow()) {
 496       set_has_overflown();
 497       return false;
 498     }
 499     return true;
 500   }
 501   void mark_stack_pop(oop* arr, int max, int* n) {
 502     _markStack.par_pop_arr(arr, max, n);
 503   }
 504   size_t mark_stack_size()                { return _markStack.size(); }
 505   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 506   bool mark_stack_overflow()              { return _markStack.overflow(); }
 507   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 508 
 509   G1CMRootRegions* root_regions() { return &_root_regions; }
 510 
 511   bool concurrent_marking_in_progress() {
 512     return _concurrent_marking_in_progress;
 513   }
 514   void set_concurrent_marking_in_progress() {
 515     _concurrent_marking_in_progress = true;
 516   }
 517   void clear_concurrent_marking_in_progress() {
 518     _concurrent_marking_in_progress = false;
 519   }
 520 
 521   void register_concurrent_phase_start(const char* title);
 522   void register_concurrent_phase_end();


 523 
 524   void update_accum_task_vtime(int i, double vtime) {
 525     _accum_task_vtime[i] += vtime;
 526   }
 527 
 528   double all_task_accum_vtime() {
 529     double ret = 0.0;
 530     for (uint i = 0; i < _max_worker_id; ++i)
 531       ret += _accum_task_vtime[i];
 532     return ret;
 533   }
 534 
 535   // Attempts to steal an object from the task queues of other tasks
 536   bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 537 
 538   G1ConcurrentMark(G1CollectedHeap* g1h,
 539                    G1RegionToSpaceMapper* prev_bitmap_storage,
 540                    G1RegionToSpaceMapper* next_bitmap_storage);
 541   ~G1ConcurrentMark();
 542 




 335   WorkGangBarrierSync     _second_overflow_barrier_sync;
 336 
 337   // This is set by any task, when an overflow on the global data
 338   // structures is detected
 339   volatile bool           _has_overflown;
 340   // True: marking is concurrent, false: we're in remark
 341   volatile bool           _concurrent;
 342   // Set at the end of a Full GC so that marking aborts
 343   volatile bool           _has_aborted;
 344 
 345   // Used when remark aborts due to an overflow to indicate that
 346   // another concurrent marking phase should start
 347   volatile bool           _restart_for_overflow;
 348 
 349   // This is true from the very start of concurrent marking until the
 350   // point when all the tasks complete their work. It is really used
 351   // to determine the points between the end of concurrent marking and
 352   // time of remark.
 353   volatile bool           _concurrent_marking_in_progress;
 354 
 355   // There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort())
 356   // to call ConcurrentGCTimer::register_gc_concurrent_end().
 357   // And this variable is used to keep track of concurrent phase.
 358   volatile uint           _concurrent_phase_status;
 359   // Concurrent phase is not yet started.
 360   static const uint       ConcPhaseNotStarted = 0;
 361   // Concurrent phase is started.
 362   static const uint       ConcPhaseStarted = 1;
 363   // Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase.
 364   // So other thread should wait until the status to be changed to ConcPhaseNotStarted.
 365   static const uint       ConcPhaseStopping = 2;
 366 
 367   // All of these times are in ms
 368   NumberSeq _init_times;
 369   NumberSeq _remark_times;
 370   NumberSeq _remark_mark_times;
 371   NumberSeq _remark_weak_ref_times;
 372   NumberSeq _cleanup_times;
 373   double    _total_counting_time;
 374   double    _total_rs_scrub_time;
 375 
 376   double*   _accum_task_vtime;   // Accumulated task vtime
 377 
 378   WorkGang* _parallel_workers;
 379 
 380   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 381   void weakRefsWork(bool clear_all_soft_refs);
 382 
 383   void swapMarkBitMaps();
 384 
 385   // It resets the global marking data structures, as well as the


 477   // are then used to populate the _region_bm, _card_bm, and
 478   // the total live bytes, which are then subsequently updated
 479   // during cleanup.
 480 
 481   // An array of bitmaps (one bit map per task). Each bitmap
 482   // is used to record the cards spanned by the live objects
 483   // marked by that task/worker.
 484   BitMap*  _count_card_bitmaps;
 485 
 486   // Used to record the number of marked live bytes
 487   // (for each region, by worker thread).
 488   size_t** _count_marked_bytes;
 489 
 490   // Card index of the bottom of the G1 heap. Used for biasing indices into
 491   // the card bitmaps.
 492   intptr_t _heap_bottom_card_num;
 493 
 494   // Set to true when initialization is complete
 495   bool _completed_initialization;
 496 
 497   // end_timer, true to end gc timer after ending concurrent phase.
 498   void register_concurrent_phase_end_common(bool end_timer);
 499 
 500 public:
 501   // Manipulation of the global mark stack.
 502   // The push and pop operations are used by tasks for transfers
 503   // between task-local queues and the global mark stack, and use
 504   // locking for concurrency safety.
 505   bool mark_stack_push(oop* arr, int n) {
 506     _markStack.par_push_arr(arr, n);
 507     if (_markStack.overflow()) {
 508       set_has_overflown();
 509       return false;
 510     }
 511     return true;
 512   }
 513   void mark_stack_pop(oop* arr, int max, int* n) {
 514     _markStack.par_pop_arr(arr, max, n);
 515   }
 516   size_t mark_stack_size()                { return _markStack.size(); }
 517   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 518   bool mark_stack_overflow()              { return _markStack.overflow(); }
 519   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 520 
 521   G1CMRootRegions* root_regions() { return &_root_regions; }
 522 
 523   bool concurrent_marking_in_progress() {
 524     return _concurrent_marking_in_progress;
 525   }
 526   void set_concurrent_marking_in_progress() {
 527     _concurrent_marking_in_progress = true;
 528   }
 529   void clear_concurrent_marking_in_progress() {
 530     _concurrent_marking_in_progress = false;
 531   }
 532 
 533   void register_concurrent_phase_start(const char* title);
 534   void register_concurrent_phase_end();
 535   // Ends both concurrent phase and timer.
 536   void register_concurrent_gc_end();
 537 
 538   void update_accum_task_vtime(int i, double vtime) {
 539     _accum_task_vtime[i] += vtime;
 540   }
 541 
 542   double all_task_accum_vtime() {
 543     double ret = 0.0;
 544     for (uint i = 0; i < _max_worker_id; ++i)
 545       ret += _accum_task_vtime[i];
 546     return ret;
 547   }
 548 
 549   // Attempts to steal an object from the task queues of other tasks
 550   bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 551 
 552   G1ConcurrentMark(G1CollectedHeap* g1h,
 553                    G1RegionToSpaceMapper* prev_bitmap_storage,
 554                    G1RegionToSpaceMapper* next_bitmap_storage);
 555   ~G1ConcurrentMark();
 556 


< prev index next >