< prev index next >

src/share/vm/gc/g1/concurrentMark.hpp

Print this page
rev 9733 : [mq]: webrev.00
rev 9734 : [mq]: webrev.01


 336   WorkGangBarrierSync     _second_overflow_barrier_sync;
 337 
 338   // This is set by any task, when an overflow on the global data
 339   // structures is detected
 340   volatile bool           _has_overflown;
 341   // True: marking is concurrent, false: we're in remark
 342   volatile bool           _concurrent;
 343   // Set at the end of a Full GC so that marking aborts
 344   volatile bool           _has_aborted;
 345 
 346   // Used when remark aborts due to an overflow to indicate that
 347   // another concurrent marking phase should start
 348   volatile bool           _restart_for_overflow;
 349 
 350   // This is true from the very start of concurrent marking until the
 351   // point when all the tasks complete their work. It is really used
 352   // to determine the points between the end of concurrent marking and
 353   // time of remark.
 354   volatile bool           _concurrent_marking_in_progress;
 355 
 356   // True only inside of markFromRoots().
 357   // Similar to _concurrent_marking_in_progress but this is set to false
 358   // when CMConcurrentMarkingTask is finished.
 359   volatile bool           _concurrent_marking_from_roots;
 360 
 361   // All of these times are in ms
 362   NumberSeq _init_times;
 363   NumberSeq _remark_times;
 364   NumberSeq _remark_mark_times;
 365   NumberSeq _remark_weak_ref_times;
 366   NumberSeq _cleanup_times;
 367   double    _total_counting_time;
 368   double    _total_rs_scrub_time;
 369 
 370   double*   _accum_task_vtime;   // Accumulated task vtime
 371 
 372   WorkGang* _parallel_workers;
 373 
 374   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 375   void weakRefsWork(bool clear_all_soft_refs);
 376 
 377   void swapMarkBitMaps();
 378 
 379   // It resets the global marking data structures, as well as the


 447   CMTaskQueue* task_queue(int id) {
 448     assert(0 <= id && id < (int) _active_tasks,
 449            "task queue id not within active bounds");
 450     return (CMTaskQueue*) _task_queues->queue(id);
 451   }
 452 
 453   // Returns the task queue set
 454   CMTaskQueueSet* task_queues()  { return _task_queues; }
 455 
 456   // Access / manipulation of the overflow flag which is set to
 457   // indicate that the global stack has overflown
 458   bool has_overflown()           { return _has_overflown; }
 459   void set_has_overflown()       { _has_overflown = true; }
 460   void clear_has_overflown()     { _has_overflown = false; }
 461   bool restart_for_overflow()    { return _restart_for_overflow; }
 462 
 463   // Methods to enter the two overflow sync barriers
 464   void enter_first_sync_barrier(uint worker_id);
 465   void enter_second_sync_barrier(uint worker_id);
 466 
 467   // Start measuring concurrent mark from ConcurrentMark::markFromRoots().
 468   void register_mark_from_roots_phase_start();
 469 
 470   // End measuring concurrent mark from ConcurrentMark::markFromRoots().
 471   void register_mark_from_roots_phase_end();
 472 
 473   // Live Data Counting data structures...
 474   // These data structures are initialized at the start of
 475   // marking. They are written to while marking is active.
 476   // They are aggregated during remark; the aggregated values
 477   // are then used to populate the _region_bm, _card_bm, and
 478   // the total live bytes, which are then subsequently updated
 479   // during cleanup.
 480 
 481   // An array of bitmaps (one bit map per task). Each bitmap
 482   // is used to record the cards spanned by the live objects
 483   // marked by that task/worker.
 484   BitMap*  _count_card_bitmaps;
 485 
 486   // Used to record the number of marked live bytes
 487   // (for each region, by worker thread).
 488   size_t** _count_marked_bytes;
 489 
 490   // Card index of the bottom of the G1 heap. Used for biasing indices into
 491   // the card bitmaps.
 492   intptr_t _heap_bottom_card_num;


 510   void mark_stack_pop(oop* arr, int max, int* n) {
 511     _markStack.par_pop_arr(arr, max, n);
 512   }
 513   size_t mark_stack_size()                { return _markStack.size(); }
 514   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 515   bool mark_stack_overflow()              { return _markStack.overflow(); }
 516   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 517 
 518   CMRootRegions* root_regions() { return &_root_regions; }
 519 
 520   bool concurrent_marking_in_progress() {
 521     return _concurrent_marking_in_progress;
 522   }
 523   void set_concurrent_marking_in_progress() {
 524     _concurrent_marking_in_progress = true;
 525   }
 526   void clear_concurrent_marking_in_progress() {
 527     _concurrent_marking_in_progress = false;
 528   }
 529 
 530   bool concurrent_marking_from_roots() const {
 531     return _concurrent_marking_from_roots;
 532   }
 533 
 534   void update_accum_task_vtime(int i, double vtime) {
 535     _accum_task_vtime[i] += vtime;
 536   }
 537 
 538   double all_task_accum_vtime() {
 539     double ret = 0.0;
 540     for (uint i = 0; i < _max_worker_id; ++i)
 541       ret += _accum_task_vtime[i];
 542     return ret;
 543   }
 544 
 545   // Attempts to steal an object from the task queues of other tasks
 546   bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 547 
 548   ConcurrentMark(G1CollectedHeap* g1h,
 549                  G1RegionToSpaceMapper* prev_bitmap_storage,
 550                  G1RegionToSpaceMapper* next_bitmap_storage);
 551   ~ConcurrentMark();
 552 




 336   WorkGangBarrierSync     _second_overflow_barrier_sync;
 337 
 338   // This is set by any task, when an overflow on the global data
 339   // structures is detected
 340   volatile bool           _has_overflown;
 341   // True: marking is concurrent, false: we're in remark
 342   volatile bool           _concurrent;
 343   // Set at the end of a Full GC so that marking aborts
 344   volatile bool           _has_aborted;
 345 
 346   // Used when remark aborts due to an overflow to indicate that
 347   // another concurrent marking phase should start
 348   volatile bool           _restart_for_overflow;
 349 
 350   // This is true from the very start of concurrent marking until the
 351   // point when all the tasks complete their work. It is really used
 352   // to determine the points between the end of concurrent marking and
 353   // time of remark.
 354   volatile bool           _concurrent_marking_in_progress;
 355 
 356   // Keep track of whether we have started concurrent phase or not.
 357   volatile bool           _concurrent_phase_started;


 358 
 359   // All of these times are in ms
 360   NumberSeq _init_times;
 361   NumberSeq _remark_times;
 362   NumberSeq _remark_mark_times;
 363   NumberSeq _remark_weak_ref_times;
 364   NumberSeq _cleanup_times;
 365   double    _total_counting_time;
 366   double    _total_rs_scrub_time;
 367 
 368   double*   _accum_task_vtime;   // Accumulated task vtime
 369 
 370   WorkGang* _parallel_workers;
 371 
 372   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 373   void weakRefsWork(bool clear_all_soft_refs);
 374 
 375   void swapMarkBitMaps();
 376 
 377   // It resets the global marking data structures, as well as the


 445   CMTaskQueue* task_queue(int id) {
 446     assert(0 <= id && id < (int) _active_tasks,
 447            "task queue id not within active bounds");
 448     return (CMTaskQueue*) _task_queues->queue(id);
 449   }
 450 
 451   // Returns the task queue set
 452   CMTaskQueueSet* task_queues()  { return _task_queues; }
 453 
 454   // Access / manipulation of the overflow flag which is set to
 455   // indicate that the global stack has overflown
 456   bool has_overflown()           { return _has_overflown; }
 457   void set_has_overflown()       { _has_overflown = true; }
 458   void clear_has_overflown()     { _has_overflown = false; }
 459   bool restart_for_overflow()    { return _restart_for_overflow; }
 460 
 461   // Methods to enter the two overflow sync barriers
 462   void enter_first_sync_barrier(uint worker_id);
 463   void enter_second_sync_barrier(uint worker_id);
 464 






 465   // Live Data Counting data structures...
 466   // These data structures are initialized at the start of
 467   // marking. They are written to while marking is active.
 468   // They are aggregated during remark; the aggregated values
 469   // are then used to populate the _region_bm, _card_bm, and
 470   // the total live bytes, which are then subsequently updated
 471   // during cleanup.
 472 
 473   // An array of bitmaps (one bit map per task). Each bitmap
 474   // is used to record the cards spanned by the live objects
 475   // marked by that task/worker.
 476   BitMap*  _count_card_bitmaps;
 477 
 478   // Used to record the number of marked live bytes
 479   // (for each region, by worker thread).
 480   size_t** _count_marked_bytes;
 481 
 482   // Card index of the bottom of the G1 heap. Used for biasing indices into
 483   // the card bitmaps.
 484   intptr_t _heap_bottom_card_num;


 502   void mark_stack_pop(oop* arr, int max, int* n) {
 503     _markStack.par_pop_arr(arr, max, n);
 504   }
 505   size_t mark_stack_size()                { return _markStack.size(); }
 506   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
 507   bool mark_stack_overflow()              { return _markStack.overflow(); }
 508   bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 509 
 510   CMRootRegions* root_regions() { return &_root_regions; }
 511 
 512   bool concurrent_marking_in_progress() {
 513     return _concurrent_marking_in_progress;
 514   }
 515   void set_concurrent_marking_in_progress() {
 516     _concurrent_marking_in_progress = true;
 517   }
 518   void clear_concurrent_marking_in_progress() {
 519     _concurrent_marking_in_progress = false;
 520   }
 521 
 522   void register_concurrent_phase_start(const char* title);
 523   void register_concurrent_phase_end();

 524 
 525   void update_accum_task_vtime(int i, double vtime) {
 526     _accum_task_vtime[i] += vtime;
 527   }
 528 
 529   double all_task_accum_vtime() {
 530     double ret = 0.0;
 531     for (uint i = 0; i < _max_worker_id; ++i)
 532       ret += _accum_task_vtime[i];
 533     return ret;
 534   }
 535 
 536   // Attempts to steal an object from the task queues of other tasks
 537   bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 538 
 539   ConcurrentMark(G1CollectedHeap* g1h,
 540                  G1RegionToSpaceMapper* prev_bitmap_storage,
 541                  G1RegionToSpaceMapper* next_bitmap_storage);
 542   ~ConcurrentMark();
 543 


< prev index next >