src/share/vm/gc/g1/g1ConcurrentMark.hpp

Print this page




 175 
 176   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.
 177 
 178   OopChunk* _base;               // Bottom address of allocated memory area.
 179   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 180 
 181   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 182   OopChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 183   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 184   OopChunk* volatile _chunk_list; // List of chunks currently containing data.
 185   volatile size_t _chunks_in_chunk_list;
 186   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
 187 
 188   volatile size_t _hwm;          // High water mark within the reserved space.
 189   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 190 
 191   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 192   // NULL if out of memory.
 193   OopChunk* allocate_new_chunk();
 194 


 195   volatile bool _out_of_memory;
 196 
 197   // Atomically add the given chunk to the list.
 198   void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
 199   // Atomically remove and return a chunk from the given list. Returns NULL if the
 200   // list is empty.
 201   OopChunk* remove_chunk_from_list(OopChunk* volatile* list);
 202 
 203   void add_chunk_to_chunk_list(OopChunk* elem);
 204   void add_chunk_to_free_list(OopChunk* elem);
 205 
 206   OopChunk* remove_chunk_from_chunk_list();
 207   OopChunk* remove_chunk_from_free_list();
 208 
 209   bool  _should_expand;
 210 
 211   // Resizes the mark stack to the given new capacity. Releases any previous
 212   // memory if successful.
 213   bool resize(size_t new_capacity);
 214 


 369 
 370   // Marking tasks
 371   uint                    _max_worker_id;// Maximum worker id
 372   uint                    _active_tasks; // Task num currently active
 373   G1CMTask**              _tasks;        // Task queue array (max_worker_id len)
 374   G1CMTaskQueueSet*       _task_queues;  // Task queue set
 375   ParallelTaskTerminator  _terminator;   // For termination
 376 
 377   // Two sync barriers that are used to synchronize tasks when an
 378   // overflow occurs. The algorithm is the following. All tasks enter
 379   // the first one to ensure that they have all stopped manipulating
 380   // the global data structures. After they exit it, they re-initialize
 381   // their data structures and task 0 re-initializes the global data
 382   // structures. Then, they enter the second sync barrier. This
 383   // ensure, that no task starts doing work before all data
 384   // structures (local and global) have been re-initialized. When they
 385   // exit it, they are free to start working again.
 386   WorkGangBarrierSync     _first_overflow_barrier_sync;
 387   WorkGangBarrierSync     _second_overflow_barrier_sync;
 388 
 389   // This is set by any task, when an overflow on the global data
 390   // structures is detected
 391   volatile bool           _has_overflown;
 392   // True: marking is concurrent, false: we're in remark
 393   volatile bool           _concurrent;
 394   // Set at the end of a Full GC so that marking aborts
 395   volatile bool           _has_aborted;
 396 
 397   // Used when remark aborts due to an overflow to indicate that
 398   // another concurrent marking phase should start
 399   volatile bool           _restart_for_overflow;
 400 
 401   // This is true from the very start of concurrent marking until the
 402   // point when all the tasks complete their work. It is really used
 403   // to determine the points between the end of concurrent marking and
 404   // time of remark.
 405   volatile bool           _concurrent_marking_in_progress;
 406 
 407   ConcurrentGCTimer*      _gc_timer_cm;
 408 
 409   G1OldTracer*            _gc_tracer_cm;
 410 
 411   // All of these times are in ms


 415   NumberSeq _remark_weak_ref_times;
 416   NumberSeq _cleanup_times;
 417   double    _total_counting_time;
 418   double    _total_rs_scrub_time;
 419 
 420   double*   _accum_task_vtime;   // Accumulated task vtime
 421 
 422   WorkGang* _parallel_workers;
 423 
 424   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 425   void weakRefsWork(bool clear_all_soft_refs);
 426 
 427   void swapMarkBitMaps();
 428 
 429   // It resets the global marking data structures, as well as the
 430   // task local ones; should be called during initial mark.
 431   void reset();
 432 
 433   // Resets all the marking data structures. Called when we have to restart
 434   // marking or when marking completes (via set_non_marking_state below).
 435   void reset_marking_state(bool clear_overflow = true);
 436 
 437   // We do this after we're done with marking so that the marking data
 438   // structures are initialized to a sensible and predictable state.
 439   void set_non_marking_state();
 440 
 441   // Called to indicate how many threads are currently active.
 442   void set_concurrency(uint active_tasks);
 443 
 444   // It should be called to indicate which phase we're in (concurrent
 445   // mark or remark) and how many threads are currently active.
 446   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 447 
 448   // Prints all gathered CM-related statistics
 449   void print_stats();
 450 
 451   bool cleanup_list_is_empty() {
 452     return _cleanup_list.is_empty();
 453   }
 454 
 455   // Accessor methods


 488 
 489   // Returns the task with the given id
 490   G1CMTask* task(int id) {
 491     assert(0 <= id && id < (int) _active_tasks,
 492            "task id not within active bounds");
 493     return _tasks[id];
 494   }
 495 
 496   // Returns the task queue with the given id
 497   G1CMTaskQueue* task_queue(int id) {
 498     assert(0 <= id && id < (int) _active_tasks,
 499            "task queue id not within active bounds");
 500     return (G1CMTaskQueue*) _task_queues->queue(id);
 501   }
 502 
 503   // Returns the task queue set
 504   G1CMTaskQueueSet* task_queues()  { return _task_queues; }
 505 
 506   // Access / manipulation of the overflow flag which is set to
 507   // indicate that the global stack has overflown
 508   bool has_overflown()           { return _has_overflown; }
 509   void set_has_overflown()       { _has_overflown = true; }
 510   void clear_has_overflown()     { _has_overflown = false; }
 511   bool restart_for_overflow()    { return _restart_for_overflow; }
 512 
 513   // Methods to enter the two overflow sync barriers
 514   void enter_first_sync_barrier(uint worker_id);
 515   void enter_second_sync_barrier(uint worker_id);
 516 
 517   // Card index of the bottom of the G1 heap. Used for biasing indices into
 518   // the card bitmaps.
 519   intptr_t _heap_bottom_card_num;
 520 
 521   // Set to true when initialization is complete
 522   bool _completed_initialization;
 523 
 524   // end_timer, true to end gc timer after ending concurrent phase.
 525   void register_concurrent_phase_end_common(bool end_timer);
 526 
 527   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 528   // true, periodically insert checks to see if this method should exit prematurely.
 529   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 530 public:
 531   // Manipulation of the global mark stack.
 532   // The push and pop operations are used by tasks for transfers
 533   // between task-local queues and the global mark stack.
 534   bool mark_stack_push(oop* arr) {
 535     if (!_global_mark_stack.par_push_chunk(arr)) {
 536       set_has_overflown();
 537       return false;
 538     }
 539     return true;
 540   }
 541   bool mark_stack_pop(oop* arr) {
 542     return _global_mark_stack.par_pop_chunk(arr);
 543   }
 544   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 545   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
 546   bool mark_stack_overflow()              { return _global_mark_stack.is_out_of_memory(); }
 547   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 548 
 549   G1CMRootRegions* root_regions() { return &_root_regions; }
 550 
 551   bool concurrent_marking_in_progress() {
 552     return _concurrent_marking_in_progress;
 553   }
 554   void set_concurrent_marking_in_progress() {
 555     _concurrent_marking_in_progress = true;
 556   }
 557   void clear_concurrent_marking_in_progress() {
 558     _concurrent_marking_in_progress = false;
 559   }
 560 
 561   void concurrent_cycle_start();
 562   void concurrent_cycle_end();
 563 
 564   void update_accum_task_vtime(int i, double vtime) {
 565     _accum_task_vtime[i] += vtime;
 566   }




 175 
 176   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.
 177 
 178   OopChunk* _base;               // Bottom address of allocated memory area.
 179   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 180 
 181   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 182   OopChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 183   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 184   OopChunk* volatile _chunk_list; // List of chunks currently containing data.
 185   volatile size_t _chunks_in_chunk_list;
 186   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
 187 
 188   volatile size_t _hwm;          // High water mark within the reserved space.
 189   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 190 
 191   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 192   // NULL if out of memory.
 193   OopChunk* allocate_new_chunk();
 194 
 195   // This is set by any task, when an overflow on the global data
 196   // structures is detected
 197   volatile bool _out_of_memory;
 198 
 199   // Atomically add the given chunk to the list.
 200   void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
 201   // Atomically remove and return a chunk from the given list. Returns NULL if the
 202   // list is empty.
 203   OopChunk* remove_chunk_from_list(OopChunk* volatile* list);
 204 
 205   void add_chunk_to_chunk_list(OopChunk* elem);
 206   void add_chunk_to_free_list(OopChunk* elem);
 207 
 208   OopChunk* remove_chunk_from_chunk_list();
 209   OopChunk* remove_chunk_from_free_list();
 210 
 211   bool  _should_expand;
 212 
 213   // Resizes the mark stack to the given new capacity. Releases any previous
 214   // memory if successful.
 215   bool resize(size_t new_capacity);
 216 


 371 
 372   // Marking tasks
 373   uint                    _max_worker_id;// Maximum worker id
 374   uint                    _active_tasks; // Task num currently active
 375   G1CMTask**              _tasks;        // Task queue array (max_worker_id len)
 376   G1CMTaskQueueSet*       _task_queues;  // Task queue set
 377   ParallelTaskTerminator  _terminator;   // For termination
 378 
 379   // Two sync barriers that are used to synchronize tasks when an
 380   // overflow occurs. The algorithm is the following. All tasks enter
 381   // the first one to ensure that they have all stopped manipulating
 382   // the global data structures. After they exit it, they re-initialize
 383   // their data structures and task 0 re-initializes the global data
 384   // structures. Then, they enter the second sync barrier. This
 385   // ensure, that no task starts doing work before all data
 386   // structures (local and global) have been re-initialized. When they
 387   // exit it, they are free to start working again.
 388   WorkGangBarrierSync     _first_overflow_barrier_sync;
 389   WorkGangBarrierSync     _second_overflow_barrier_sync;
 390 



 391   // True: marking is concurrent, false: we're in remark
 392   volatile bool           _concurrent;
 393   // Set at the end of a Full GC so that marking aborts
 394   volatile bool           _has_aborted;
 395 
 396   // Used when remark aborts due to an overflow to indicate that
 397   // another concurrent marking phase should start
 398   volatile bool           _restart_for_overflow;
 399 
 400   // This is true from the very start of concurrent marking until the
 401   // point when all the tasks complete their work. It is really used
 402   // to determine the points between the end of concurrent marking and
 403   // time of remark.
 404   volatile bool           _concurrent_marking_in_progress;
 405 
 406   ConcurrentGCTimer*      _gc_timer_cm;
 407 
 408   G1OldTracer*            _gc_tracer_cm;
 409 
 410   // All of these times are in ms


 414   NumberSeq _remark_weak_ref_times;
 415   NumberSeq _cleanup_times;
 416   double    _total_counting_time;
 417   double    _total_rs_scrub_time;
 418 
 419   double*   _accum_task_vtime;   // Accumulated task vtime
 420 
 421   WorkGang* _parallel_workers;
 422 
 423   void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
 424   void weakRefsWork(bool clear_all_soft_refs);
 425 
 426   void swapMarkBitMaps();
 427 
 428   // It resets the global marking data structures, as well as the
 429   // task local ones; should be called during initial mark.
 430   void reset();
 431 
 432   // Resets all the marking data structures. Called when we have to restart
 433   // marking or when marking completes (via set_non_marking_state below).
 434   void reset_marking_state();
 435 
 436   // We do this after we're done with marking so that the marking data
 437   // structures are initialized to a sensible and predictable state.
 438   void set_non_marking_state();
 439 
 440   // Called to indicate how many threads are currently active.
 441   void set_concurrency(uint active_tasks);
 442 
 443   // It should be called to indicate which phase we're in (concurrent
 444   // mark or remark) and how many threads are currently active.
 445   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 446 
 447   // Prints all gathered CM-related statistics
 448   void print_stats();
 449 
 450   bool cleanup_list_is_empty() {
 451     return _cleanup_list.is_empty();
 452   }
 453 
 454   // Accessor methods


 487 
 488   // Returns the task with the given id
 489   G1CMTask* task(int id) {
 490     assert(0 <= id && id < (int) _active_tasks,
 491            "task id not within active bounds");
 492     return _tasks[id];
 493   }
 494 
 495   // Returns the task queue with the given id
 496   G1CMTaskQueue* task_queue(int id) {
 497     assert(0 <= id && id < (int) _active_tasks,
 498            "task queue id not within active bounds");
 499     return (G1CMTaskQueue*) _task_queues->queue(id);
 500   }
 501 
 502   // Returns the task queue set
 503   G1CMTaskQueueSet* task_queues()  { return _task_queues; }
 504 
 505   // Access / manipulation of the overflow flag which is set to
 506   // indicate that the global stack has overflown
 507   bool has_overflown()           { return _global_mark_stack.is_out_of_memory(); }
 508   void clear_has_overflown()     { _global_mark_stack.clear_out_of_memory(); }

 509   bool restart_for_overflow()    { return _restart_for_overflow; }
 510 
 511   // Methods to enter the two overflow sync barriers
 512   void enter_first_sync_barrier(uint worker_id);
 513   void enter_second_sync_barrier(uint worker_id);
 514 
 515   // Card index of the bottom of the G1 heap. Used for biasing indices into
 516   // the card bitmaps.
 517   intptr_t _heap_bottom_card_num;
 518 
 519   // Set to true when initialization is complete
 520   bool _completed_initialization;
 521 
 522   // end_timer, true to end gc timer after ending concurrent phase.
 523   void register_concurrent_phase_end_common(bool end_timer);
 524 
 525   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 526   // true, periodically insert checks to see if this method should exit prematurely.
 527   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 528 public:
 529   // Manipulation of the global mark stack.
 530   // The push and pop operations are used by tasks for transfers
 531   // between task-local queues and the global mark stack.
 532   bool mark_stack_push(oop* arr) {
 533     return _global_mark_stack.par_push_chunk(arr);




 534   }
 535   bool mark_stack_pop(oop* arr) {
 536     return _global_mark_stack.par_pop_chunk(arr);
 537   }
 538   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 539   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }

 540   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 541 
 542   G1CMRootRegions* root_regions() { return &_root_regions; }
 543 
 544   bool concurrent_marking_in_progress() {
 545     return _concurrent_marking_in_progress;
 546   }
 547   void set_concurrent_marking_in_progress() {
 548     _concurrent_marking_in_progress = true;
 549   }
 550   void clear_concurrent_marking_in_progress() {
 551     _concurrent_marking_in_progress = false;
 552   }
 553 
 554   void concurrent_cycle_start();
 555   void concurrent_cycle_end();
 556 
 557   void update_accum_task_vtime(int i, double vtime) {
 558     _accum_task_vtime[i] += vtime;
 559   }