< prev index next >

src/share/vm/gc/g1/concurrentMark.hpp

Print this page
rev 9733 : [mq]: webrev.00
rev 9734 : [mq]: webrev.01


 336   WorkGangBarrierSync     _second_overflow_barrier_sync;
 337 
 338   // This is set by any task, when an overflow on the global data
 339   // structures is detected
 340   volatile bool           _has_overflown;
 341   // True: marking is concurrent, false: we're in remark
 342   volatile bool           _concurrent;
 343   // Set at the end of a Full GC so that marking aborts
 344   volatile bool           _has_aborted;
 345 
 346   // Used when remark aborts due to an overflow to indicate that
 347   // another concurrent marking phase should start
 348   volatile bool           _restart_for_overflow;
 349 
 350   // This is true from the very start of concurrent marking until the
 351   // point when all the tasks complete their work. It is really used
 352   // to determine the points between the end of concurrent marking and
 353   // time of remark.
 354   volatile bool           _concurrent_marking_in_progress;
 355 



 356   // All of these times are in ms
 357   NumberSeq _init_times;
 358   NumberSeq _remark_times;
 359   NumberSeq _remark_mark_times;
 360   NumberSeq _remark_weak_ref_times;
 361   NumberSeq _cleanup_times;
 362   double    _total_counting_time;
 363   double    _total_rs_scrub_time;
 364 
 365   double*   _accum_task_vtime;   // Accumulated task vtime
 366 
 367   WorkGang* _parallel_workers;
 368 
 369   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 370   void weakRefsWork(bool clear_all_soft_refs);
 371 
 372   void swapMarkBitMaps();
 373 
 374   // It resets the global marking data structures, as well as the
 375   // task local ones; should be called during initial mark.


 498   }
 499   void mark_stack_pop(oop* arr, int max, int* n) {
 500     _markStack.par_pop_arr(arr, max, n);
 501   }
 502   size_t mark_stack_size()                { return _markStack.size(); }
 503   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 504   bool mark_stack_overflow()              { return _markStack.overflow(); }
 505   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 506 
 507   CMRootRegions* root_regions() { return &_root_regions; }
 508 
 509   bool concurrent_marking_in_progress() {
 510     return _concurrent_marking_in_progress;
 511   }
 512   void set_concurrent_marking_in_progress() {
 513     _concurrent_marking_in_progress = true;
 514   }
 515   void clear_concurrent_marking_in_progress() {
 516     _concurrent_marking_in_progress = false;
 517   }



 518 
 519   void update_accum_task_vtime(int i, double vtime) {
 520     _accum_task_vtime[i] += vtime;
 521   }
 522 
 523   double all_task_accum_vtime() {
 524     double ret = 0.0;
 525     for (uint i = 0; i < _max_worker_id; ++i)
 526       ret += _accum_task_vtime[i];
 527     return ret;
 528   }
 529 
 530   // Attempts to steal an object from the task queues of other tasks
 531   bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 532 
 533   ConcurrentMark(G1CollectedHeap* g1h,
 534                  G1RegionToSpaceMapper* prev_bitmap_storage,
 535                  G1RegionToSpaceMapper* next_bitmap_storage);
 536   ~ConcurrentMark();
 537 




 336   WorkGangBarrierSync     _second_overflow_barrier_sync;
 337 
 338   // This is set by any task, when an overflow on the global data
 339   // structures is detected
 340   volatile bool           _has_overflown;
 341   // True: marking is concurrent, false: we're in remark
 342   volatile bool           _concurrent;
 343   // Set at the end of a Full GC so that marking aborts
 344   volatile bool           _has_aborted;
 345 
 346   // Used when remark aborts due to an overflow to indicate that
 347   // another concurrent marking phase should start
 348   volatile bool           _restart_for_overflow;
 349 
 350   // This is true from the very start of concurrent marking until the
 351   // point when all the tasks complete their work. It is really used
 352   // to determine the points between the end of concurrent marking and
 353   // time of remark.
 354   volatile bool           _concurrent_marking_in_progress;
 355 
 356   // Keep track of whether we have started concurrent phase or not.
 357   volatile bool           _concurrent_phase_started;
 358 
 359   // All of these times are in ms
 360   NumberSeq _init_times;
 361   NumberSeq _remark_times;
 362   NumberSeq _remark_mark_times;
 363   NumberSeq _remark_weak_ref_times;
 364   NumberSeq _cleanup_times;
 365   double    _total_counting_time;
 366   double    _total_rs_scrub_time;
 367 
 368   double*   _accum_task_vtime;   // Accumulated task vtime
 369 
 370   WorkGang* _parallel_workers;
 371 
 372   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 373   void weakRefsWork(bool clear_all_soft_refs);
 374 
 375   void swapMarkBitMaps();
 376 
 377   // It resets the global marking data structures, as well as the
 378   // task local ones; should be called during initial mark.


 501   }
 502   void mark_stack_pop(oop* arr, int max, int* n) {
 503     _markStack.par_pop_arr(arr, max, n);
 504   }
 505   size_t mark_stack_size()                { return _markStack.size(); }
 506   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 507   bool mark_stack_overflow()              { return _markStack.overflow(); }
 508   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 509 
 510   CMRootRegions* root_regions() { return &_root_regions; }
 511 
 512   bool concurrent_marking_in_progress() {
 513     return _concurrent_marking_in_progress;
 514   }
 515   void set_concurrent_marking_in_progress() {
 516     _concurrent_marking_in_progress = true;
 517   }
 518   void clear_concurrent_marking_in_progress() {
 519     _concurrent_marking_in_progress = false;
 520   }
 521 
 522   void register_concurrent_phase_start(const char* title);
 523   void register_concurrent_phase_end();
 524 
 525   void update_accum_task_vtime(int i, double vtime) {
 526     _accum_task_vtime[i] += vtime;
 527   }
 528 
 529   double all_task_accum_vtime() {
 530     double ret = 0.0;
 531     for (uint i = 0; i < _max_worker_id; ++i)
 532       ret += _accum_task_vtime[i];
 533     return ret;
 534   }
 535 
 536   // Attempts to steal an object from the task queues of other tasks
 537   bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 538 
 539   ConcurrentMark(G1CollectedHeap* g1h,
 540                  G1RegionToSpaceMapper* prev_bitmap_storage,
 541                  G1RegionToSpaceMapper* next_bitmap_storage);
 542   ~ConcurrentMark();
 543 


< prev index next >