< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.hpp

Print this page
rev 11935 : imported patch 8159422-high-mark-stack-contention
rev 11936 : imported patch 8159422-mikael-review


 132     return mark_distance();
 133   }
 134 
 135   G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 136 
 137   // Initializes the underlying BitMap to cover the given area.
 138   void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
 139 
 140   // Write marks.
 141   inline void mark(HeapWord* addr);
 142   inline void clear(HeapWord* addr);
 143   inline bool parMark(HeapWord* addr);
 144 
 145   void clear_range(MemRegion mr);
 146 };
 147 
 148 // Represents the overflow mark stack used by concurrent marking.
 149 //
 150 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 151 // Resizing may only happen during a STW pause when the stack is empty.













 152 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 153   ReservedSpace _reserved_space; // Space currently reserved for the mark stack.







 154 
 155   oop* _base;                    // Bottom address of allocated memory area.
 156   size_t _capacity;              // Maximum number of elements.
 157   size_t _index;                 // One more than last occupied index.
 158 
 159   size_t _saved_index;           // Value of _index saved at start of GC to detect mark stack modifications during that time.

 160 
 161   bool  _overflow;
 162   bool  _should_expand;





















 163 
 164   // Resizes the mark stack to the given new capacity. Releases any previous
 165   // memory if successful.
 166   bool resize(size_t new_capacity);
 167 
 168   bool stack_modified() const { return _index != _saved_index; }
 169  public:
 170   G1CMMarkStack();
 171   ~G1CMMarkStack();
 172 
 173   bool allocate(size_t capacity);









 174 
 175   // Pushes the first "n" elements of the given buffer on the stack.
 176   void par_push_arr(oop* buffer, size_t n);


 177 
 178   // Moves up to max elements from the stack into the given buffer. Returns
 179   // the number of elements pushed, and false if the array has been empty.
 180   // Returns true if the buffer contains at least one element.
 181   bool par_pop_arr(oop* buffer, size_t max, size_t* n);
 182 
 183   bool is_empty() const { return _index == 0; }
 184   size_t capacity() const  { return _capacity; }
 185 
 186   bool overflow() const { return _overflow; }
 187   void clear_overflow() { _overflow = false; }
 188 
 189   bool should_expand() const { return _should_expand; }
 190   void set_should_expand(bool value) { _should_expand = value; }
 191 
 192   // Expand the stack, typically in response to an overflow condition
 193   void expand();
 194 
 195   size_t size() const { return _index; }
 196 
 197   void set_empty() { _index = 0; clear_overflow(); }
 198 
 199   // Record the current index.
 200   void note_start_of_gc();
 201 
 202   // Make sure that we have not added any entries to the stack during GC.
 203   void note_end_of_gc();
 204 
 205   // Apply fn to each oop in the mark stack, up to the bound recorded
 206   // via one of the above "note" functions.  The mark stack must not
 207   // be modified while iterating.
 208   template<typename Fn> void iterate(Fn fn);
 209 };
 210 
 211 // Root Regions are regions that are not empty at the beginning of a
 212 // marking cycle and which we might collect during an evacuation pause
 213 // while the cycle is active. Given that, during evacuation pauses, we
 214 // do not copy objects that are explicitly marked, what we have to do
 215 // for the root regions is to scan them and mark all objects reachable
 216 // from them. According to the SATB assumptions, we only need to visit
 217 // each object once during marking. So, as long as we finish this scan
 218 // before the next evacuation pause, we can copy the objects from the
 219 // root regions without having to mark them or do anything else to them.
 220 //
 221 // Currently, we only support root region scanning once (at the start
 222 // of the marking cycle) and the root regions are all the survivor
 223 // regions populated during the initial-mark pause.
 224 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
 225 private:
 226   const G1SurvivorRegions* _survivors;
 227   G1ConcurrentMark*        _cm;
 228 


 261   // have been claimed.
 262   void scan_finished();
 263 
 264   // If CM threads are still scanning root regions, wait until they
 265   // are done. Return true if we had to wait, false otherwise.
 266   bool wait_until_scan_finished();
 267 };
 268 
 269 class ConcurrentMarkThread;
 270 
 271 class G1ConcurrentMark: public CHeapObj<mtGC> {
 272   friend class ConcurrentMarkThread;
 273   friend class G1ParNoteEndTask;
 274   friend class G1VerifyLiveDataClosure;
 275   friend class G1CMRefProcTaskProxy;
 276   friend class G1CMRefProcTaskExecutor;
 277   friend class G1CMKeepAliveAndDrainClosure;
 278   friend class G1CMDrainMarkingStackClosure;
 279   friend class G1CMBitMapClosure;
 280   friend class G1CMConcurrentMarkingTask;
 281   friend class G1CMMarkStack;
 282   friend class G1CMRemarkTask;
 283   friend class G1CMTask;
 284 
 285 protected:
 286   ConcurrentMarkThread* _cmThread;   // The thread doing the work
 287   G1CollectedHeap*      _g1h;        // The heap
 288   uint                  _parallel_marking_threads; // The number of marking
 289                                                    // threads we're using
 290   uint                  _max_parallel_marking_threads; // Max number of marking
 291                                                        // threads we'll ever use
 292   double                _sleep_factor; // How much we have to sleep, with
 293                                        // respect to the work we just did, to
 294                                        // meet the marking overhead goal
 295   double                _marking_task_overhead; // Marking target overhead for
 296                                                 // a single task
 297 
 298   FreeRegionList        _cleanup_list;
 299 
 300   // Concurrent marking support structures
 301   G1CMBitMap              _markBitMap1;


 462   // Methods to enter the two overflow sync barriers
 463   void enter_first_sync_barrier(uint worker_id);
 464   void enter_second_sync_barrier(uint worker_id);
 465 
 466   // Card index of the bottom of the G1 heap. Used for biasing indices into
 467   // the card bitmaps.
 468   intptr_t _heap_bottom_card_num;
 469 
 470   // Set to true when initialization is complete
 471   bool _completed_initialization;
 472 
 473   // end_timer, true to end gc timer after ending concurrent phase.
 474   void register_concurrent_phase_end_common(bool end_timer);
 475 
 476   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 477   // true, periodically insert checks to see if this method should exit prematurely.
 478   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 479 public:
 480   // Manipulation of the global mark stack.
 481   // The push and pop operations are used by tasks for transfers
 482   // between task-local queues and the global mark stack, and use
 483   // locking for concurrency safety.
 484   bool mark_stack_push(oop* arr, size_t n) {
 485     _global_mark_stack.par_push_arr(arr, n);
 486     if (_global_mark_stack.overflow()) {
 487       set_has_overflown();
 488       return false;
 489     }
 490     return true;
 491   }
 492   void mark_stack_pop(oop* arr, size_t max, size_t* n) {
 493     _global_mark_stack.par_pop_arr(arr, max, n);
 494   }
 495   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 496   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
 497   bool mark_stack_overflow()              { return _global_mark_stack.overflow(); }
 498   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 499 
 500   G1CMRootRegions* root_regions() { return &_root_regions; }
 501 
 502   bool concurrent_marking_in_progress() {
 503     return _concurrent_marking_in_progress;
 504   }
 505   void set_concurrent_marking_in_progress() {
 506     _concurrent_marking_in_progress = true;
 507   }
 508   void clear_concurrent_marking_in_progress() {
 509     _concurrent_marking_in_progress = false;
 510   }
 511 
 512   void concurrent_cycle_start();
 513   void concurrent_cycle_end();
 514 
 515   void update_accum_task_vtime(int i, double vtime) {
 516     _accum_task_vtime[i] += vtime;
 517   }


 584 
 585   // Do concurrent phase of marking, to a tentative transitive closure.
 586   void mark_from_roots();
 587 
 588   void checkpointRootsFinal(bool clear_all_soft_refs);
 589   void checkpointRootsFinalWork();
 590   void cleanup();
 591   void complete_cleanup();
 592 
 593   // Mark in the previous bitmap.  NB: this is usually read-only, so use
 594   // this carefully!
 595   inline void markPrev(oop p);
 596 
 597   // Clears marks for all objects in the given range, for the prev or
 598   // next bitmaps.  NB: the previous bitmap is usually
 599   // read-only, so use this carefully!
 600   void clearRangePrevBitmap(MemRegion mr);
 601 
 602   // Notify data structures that a GC has started.
 603   void note_start_of_gc() {
 604     _global_mark_stack.note_start_of_gc();
 605   }
 606 
 607   // Notify data structures that a GC is finished.
 608   void note_end_of_gc() {
 609     _global_mark_stack.note_end_of_gc();
 610   }
 611 
 612   // Verify that there are no CSet oops on the stacks (taskqueues /
 613   // global mark stack) and fingers (global / per-task).
 614   // If marking is not in progress, it's a no-op.
 615   void verify_no_cset_oops() PRODUCT_RETURN;
 616 
 617   inline bool isPrevMarked(oop p) const;
 618 
 619   inline bool do_yield_check();
 620 
 621   // Abandon current marking iteration due to a Full GC.
 622   void abort();
 623 
 624   bool has_aborted()      { return _has_aborted; }
 625 
 626   void print_summary_info();
 627 
 628   void print_worker_threads_on(outputStream* st) const;
 629   void threads_do(ThreadClosure* tc) const;


 653   // Aggregates the per-card liveness data based on the current marking. Also sets
 654   // the amount of marked bytes for each region.
 655   void create_live_data();
 656 
 657   void finalize_live_data();
 658 
 659   void verify_live_data();
 660 };
 661 
 662 // A class representing a marking task.
 663 class G1CMTask : public TerminatorTerminator {
 664 private:
 665   enum PrivateConstants {
 666     // The regular clock call is called once the scanned words reaches
 667     // this limit
 668     words_scanned_period          = 12*1024,
 669     // The regular clock call is called once the number of visited
 670     // references reaches this limit
 671     refs_reached_period           = 384,
 672     // Initial value for the hash seed, used in the work stealing code
 673     init_hash_seed                = 17,
 674     // How many entries will be transferred between global stack and
 675     // local queues at once.
 676     global_stack_transfer_size    = 1024
 677   };
 678 
 679   uint                        _worker_id;
 680   G1CollectedHeap*            _g1h;
 681   G1ConcurrentMark*           _cm;
 682   G1CMBitMap*                 _nextMarkBitMap;
 683   // the task queue of this task
 684   G1CMTaskQueue*              _task_queue;
 685 private:
 686   // the task queue set---needed for stealing
 687   G1CMTaskQueueSet*           _task_queues;
 688   // indicates whether the task has been claimed---this is only  for
 689   // debugging purposes
 690   bool                        _claimed;
 691 
 692   // number of calls to this task
 693   int                         _calls;
 694 
 695   // when the virtual timer reaches this time, the marking step should
 696   // exit


 841 
 842   // Increment the number of references this task has visited.
 843   void increment_refs_reached() { ++_refs_reached; }
 844 
 845   // Grey the object by marking it.  If not already marked, push it on
 846   // the local queue if below the finger.
 847   // obj is below its region's NTAMS.
 848   inline void make_reference_grey(oop obj);
 849 
 850   // Grey the object (by calling make_grey_reference) if required,
 851   // e.g. obj is below its containing region's NTAMS.
 852   // Precondition: obj is a valid heap object.
 853   inline void deal_with_reference(oop obj);
 854 
 855   // It scans an object and visits its children.
 856   inline void scan_object(oop obj);
 857 
 858   // It pushes an object on the local queue.
 859   inline void push(oop obj);
 860 
 861   // These two move entries to/from the global stack.
 862   void move_entries_to_global_stack();
 863   void get_entries_from_global_stack();

 864 
 865   // It pops and scans objects from the local queue. If partially is
 866   // true, then it stops when the queue size is of a given limit. If
 867   // partially is false, then it stops when the queue is empty.
 868   void drain_local_queue(bool partially);
 869   // It moves entries from the global stack to the local queue and
 870   // drains the local queue. If partially is true, then it stops when
 871   // both the global stack and the local queue reach a given size. If
 872   // partially if false, it tries to empty them totally.
 873   void drain_global_stack(bool partially);
 874   // It keeps picking SATB buffers and processing them until no SATB
 875   // buffers are available.
 876   void drain_satb_buffers();
 877 
 878   // moves the local finger to a new location
 879   inline void move_finger_to(HeapWord* new_finger) {
 880     assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
 881     _finger = new_finger;
 882   }
 883 




 132     return mark_distance();
 133   }
 134 
 135   G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 136 
 137   // Initializes the underlying BitMap to cover the given area.
 138   void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
 139 
 140   // Write marks.
 141   inline void mark(HeapWord* addr);
 142   inline void clear(HeapWord* addr);
 143   inline bool parMark(HeapWord* addr);
 144 
 145   void clear_range(MemRegion mr);
 146 };
 147 
 148 // Represents the overflow mark stack used by concurrent marking.
 149 //
 150 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 151 // Resizing may only happen during a STW pause when the stack is empty.
 152 //
 153 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
 154 // stack memory is split into evenly sized chunks of oops. Users can only
 155 // add an remove entries on that basis.
 156 // Chunks are filled in increasing address order. Not completely filled chunks
 157 // have a NULL element as a terminating element.
 158 //
 159 // Every chunk has a header containing a single pointer element used for memory
 160 // management. This wastes some space, but is negligible (< .1% with current sizing).
 161 //
 162 // Memory management is done using a mix of tracking a high water-mark indicating
 163 // that all chunks at a lower address are valid chunks, and a singly linked free
 164 // list connecting all empty chunks.
 165 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 166 public:
 167   // Number of oops that can fit in a single chunk.
 168   static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */;
 169 private:
 170   struct OopChunk {
 171     OopChunk* next;
 172     oop data[OopsPerChunk];
 173   };
 174 
 175   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.


 176 
 177   OopChunk* _base;               // Bottom address of allocated memory area.
 178   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 179 
 180   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 181   OopChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 182   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 183   OopChunk* volatile _chunk_list; // List of chunks currently containing data.
 184   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 185  
 186   size_t volatile _chunks_in_chunk_list;
 187   char _pad3[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 188  
 189   volatile size_t _hwm;          // High water mark within the reserved space.
 190   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 191  
 192   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 193   // NULL if out of memory.
 194   OopChunk* allocate_new_chunk();
 195 
 196   bool  _out_of_memory;
 197 
 198   // Atomically add the given chunk to the list.
 199   void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
 200   // Atomically remove and return a chunk from the given list. Returns NULL if the
 201   // list is empty.
 202   OopChunk* remove_chunk_from_list(OopChunk* volatile* list);  bool  _should_expand;
 203 
 204   // Resizes the mark stack to the given new capacity. Releases any previous
 205   // memory if successful.
 206   bool resize(size_t new_capacity);
 207 

 208  public:
 209   G1CMMarkStack();
 210   ~G1CMMarkStack();
 211 
 212   // Alignment and minimum capacity of this mark stack in number of oops.
 213   static size_t capacity_alignment();
 214 
 215   // Allocate and initialize the mark stack with the given number of oops.
 216   bool initialize(size_t initial_capacity, size_t max_capacity);
 217 
 218   // Pushes the given buffer containing at most OopsPerChunk elements on the mark
 219   // stack. If less than OopsPerChunk elements are to be pushed, the array must
 220   // be terminated with a NULL.
 221   void par_push_chunk(oop* buffer);
 222 
 223   // Pops a chunk from this mark stack, copying them into the given buffer. This
 224   // chunk may contain up to OopsPerChunk elements. If there are less, the last
 225   // element in the array is a NULL pointer.
 226   bool par_pop_chunk(oop* buffer);
 227 
 228   bool is_empty() const { return _chunk_list == NULL && _chunks_in_chunk_list == 0; }



 229 
 230   size_t capacity() const  { return _chunk_capacity; }

 231 
 232   bool is_out_of_memory() const { return _out_of_memory; }
 233   void clear_out_of_memory() { _out_of_memory = false; }
 234 
 235   bool should_expand() const { return _should_expand; }
 236   void set_should_expand(bool value) { _should_expand = value; }
 237 
 238   // Expand the stack, typically in response to an overflow condition
 239   void expand();
 240 
 241   // Return the approximate number of oops on this mark stack. Racy due to
 242   // unsynchronized access to _chunks_in_chunk_list.
 243   size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; }



 244  
 245   void set_empty();

 246 
 247   // Apply Fn to every oop on the mark stack. The mark stack must not

 248   // be modified while iterating.
 249   template<typename Fn> void iterate(Fn fn) PRODUCT_RETURN;
 250 };
 251 
 252 // Root Regions are regions that are not empty at the beginning of a
 253 // marking cycle and which we might collect during an evacuation pause
 254 // while the cycle is active. Given that, during evacuation pauses, we
 255 // do not copy objects that are explicitly marked, what we have to do
 256 // for the root regions is to scan them and mark all objects reachable
 257 // from them. According to the SATB assumptions, we only need to visit
 258 // each object once during marking. So, as long as we finish this scan
 259 // before the next evacuation pause, we can copy the objects from the
 260 // root regions without having to mark them or do anything else to them.
 261 //
 262 // Currently, we only support root region scanning once (at the start
 263 // of the marking cycle) and the root regions are all the survivor
 264 // regions populated during the initial-mark pause.
 265 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
 266 private:
 267   const G1SurvivorRegions* _survivors;
 268   G1ConcurrentMark*        _cm;
 269 


 302   // have been claimed.
 303   void scan_finished();
 304 
 305   // If CM threads are still scanning root regions, wait until they
 306   // are done. Return true if we had to wait, false otherwise.
 307   bool wait_until_scan_finished();
 308 };
 309 
 310 class ConcurrentMarkThread;
 311 
 312 class G1ConcurrentMark: public CHeapObj<mtGC> {
 313   friend class ConcurrentMarkThread;
 314   friend class G1ParNoteEndTask;
 315   friend class G1VerifyLiveDataClosure;
 316   friend class G1CMRefProcTaskProxy;
 317   friend class G1CMRefProcTaskExecutor;
 318   friend class G1CMKeepAliveAndDrainClosure;
 319   friend class G1CMDrainMarkingStackClosure;
 320   friend class G1CMBitMapClosure;
 321   friend class G1CMConcurrentMarkingTask;

 322   friend class G1CMRemarkTask;
 323   friend class G1CMTask;
 324 
 325 protected:
 326   ConcurrentMarkThread* _cmThread;   // The thread doing the work
 327   G1CollectedHeap*      _g1h;        // The heap
 328   uint                  _parallel_marking_threads; // The number of marking
 329                                                    // threads we're using
 330   uint                  _max_parallel_marking_threads; // Max number of marking
 331                                                        // threads we'll ever use
 332   double                _sleep_factor; // How much we have to sleep, with
 333                                        // respect to the work we just did, to
 334                                        // meet the marking overhead goal
 335   double                _marking_task_overhead; // Marking target overhead for
 336                                                 // a single task
 337 
 338   FreeRegionList        _cleanup_list;
 339 
 340   // Concurrent marking support structures
 341   G1CMBitMap              _markBitMap1;


 502   // Methods to enter the two overflow sync barriers
 503   void enter_first_sync_barrier(uint worker_id);
 504   void enter_second_sync_barrier(uint worker_id);
 505 
 506   // Card index of the bottom of the G1 heap. Used for biasing indices into
 507   // the card bitmaps.
 508   intptr_t _heap_bottom_card_num;
 509 
 510   // Set to true when initialization is complete
 511   bool _completed_initialization;
 512 
 513   // end_timer, true to end gc timer after ending concurrent phase.
 514   void register_concurrent_phase_end_common(bool end_timer);
 515 
 516   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 517   // true, periodically insert checks to see if this method should exit prematurely.
 518   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 519 public:
 520   // Manipulation of the global mark stack.
 521   // The push and pop operations are used by tasks for transfers
 522   // between task-local queues and the global mark stack.
 523   bool mark_stack_push(oop* arr) {
 524     _global_mark_stack.par_push_chunk(arr);
 525     if (_global_mark_stack.is_out_of_memory()) {

 526       set_has_overflown();
 527       return false;
 528     }
 529     return true;
 530   }
 531   bool mark_stack_pop(oop* arr) {
 532     return _global_mark_stack.par_pop_chunk(arr);
 533   }
 534   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 535   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
 536   bool mark_stack_overflow()              { return _global_mark_stack.is_out_of_memory(); }
 537   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 538 
 539   G1CMRootRegions* root_regions() { return &_root_regions; }
 540 
 541   bool concurrent_marking_in_progress() {
 542     return _concurrent_marking_in_progress;
 543   }
 544   void set_concurrent_marking_in_progress() {
 545     _concurrent_marking_in_progress = true;
 546   }
 547   void clear_concurrent_marking_in_progress() {
 548     _concurrent_marking_in_progress = false;
 549   }
 550 
 551   void concurrent_cycle_start();
 552   void concurrent_cycle_end();
 553 
 554   void update_accum_task_vtime(int i, double vtime) {
 555     _accum_task_vtime[i] += vtime;
 556   }


 623 
 624   // Do concurrent phase of marking, to a tentative transitive closure.
 625   void mark_from_roots();
 626 
 627   void checkpointRootsFinal(bool clear_all_soft_refs);
 628   void checkpointRootsFinalWork();
 629   void cleanup();
 630   void complete_cleanup();
 631 
 632   // Mark in the previous bitmap.  NB: this is usually read-only, so use
 633   // this carefully!
 634   inline void markPrev(oop p);
 635 
 636   // Clears marks for all objects in the given range, for the prev or
 637   // next bitmaps.  NB: the previous bitmap is usually
 638   // read-only, so use this carefully!
 639   void clearRangePrevBitmap(MemRegion mr);
 640 
 641   // Notify data structures that a GC has started.
 642   void note_start_of_gc() {

 643   }
 644 
 645   // Notify data structures that a GC is finished.
 646   void note_end_of_gc() {

 647   }
 648 
 649   // Verify that there are no CSet oops on the stacks (taskqueues /
 650   // global mark stack) and fingers (global / per-task).
 651   // If marking is not in progress, it's a no-op.
 652   void verify_no_cset_oops() PRODUCT_RETURN;
 653 
 654   inline bool isPrevMarked(oop p) const;
 655 
 656   inline bool do_yield_check();
 657 
 658   // Abandon current marking iteration due to a Full GC.
 659   void abort();
 660 
 661   bool has_aborted()      { return _has_aborted; }
 662 
 663   void print_summary_info();
 664 
 665   void print_worker_threads_on(outputStream* st) const;
 666   void threads_do(ThreadClosure* tc) const;


 690   // Aggregates the per-card liveness data based on the current marking. Also sets
 691   // the amount of marked bytes for each region.
 692   void create_live_data();
 693 
 694   void finalize_live_data();
 695 
 696   void verify_live_data();
 697 };
 698 
 699 // A class representing a marking task.
 700 class G1CMTask : public TerminatorTerminator {
 701 private:
 702   enum PrivateConstants {
 703     // The regular clock call is called once the scanned words reaches
 704     // this limit
 705     words_scanned_period          = 12*1024,
 706     // The regular clock call is called once the number of visited
 707     // references reaches this limit
 708     refs_reached_period           = 384,
 709     // Initial value for the hash seed, used in the work stealing code
 710     init_hash_seed                = 17



 711   };
 712 
 713   uint                        _worker_id;
 714   G1CollectedHeap*            _g1h;
 715   G1ConcurrentMark*           _cm;
 716   G1CMBitMap*                 _nextMarkBitMap;
 717   // the task queue of this task
 718   G1CMTaskQueue*              _task_queue;
 719 private:
 720   // the task queue set---needed for stealing
 721   G1CMTaskQueueSet*           _task_queues;
 722   // indicates whether the task has been claimed---this is only  for
 723   // debugging purposes
 724   bool                        _claimed;
 725 
 726   // number of calls to this task
 727   int                         _calls;
 728 
 729   // when the virtual timer reaches this time, the marking step should
 730   // exit


 875 
 876   // Increment the number of references this task has visited.
 877   void increment_refs_reached() { ++_refs_reached; }
 878 
 879   // Grey the object by marking it.  If not already marked, push it on
 880   // the local queue if below the finger.
 881   // obj is below its region's NTAMS.
 882   inline void make_reference_grey(oop obj);
 883 
 884   // Grey the object (by calling make_grey_reference) if required,
 885   // e.g. obj is below its containing region's NTAMS.
 886   // Precondition: obj is a valid heap object.
 887   inline void deal_with_reference(oop obj);
 888 
 889   // It scans an object and visits its children.
 890   inline void scan_object(oop obj);
 891 
 892   // It pushes an object on the local queue.
 893   inline void push(oop obj);
 894 
 895   // Move entries to the global stack.
 896   void move_entries_to_global_stack();
 897   // Move entries from the global stack, return true if we were successful to do so.
 898   bool get_entries_from_global_stack();
 899 
 900   // It pops and scans objects from the local queue. If partially is
 901   // true, then it stops when the queue size is of a given limit. If
 902   // partially is false, then it stops when the queue is empty.
 903   void drain_local_queue(bool partially);
 904   // It moves entries from the global stack to the local queue and
 905   // drains the local queue. If partially is true, then it stops when
 906   // both the global stack and the local queue reach a given size. If
 907   // partially if false, it tries to empty them totally.
 908   void drain_global_stack(bool partially);
 909   // It keeps picking SATB buffers and processing them until no SATB
 910   // buffers are available.
 911   void drain_satb_buffers();
 912 
 913   // moves the local finger to a new location
 914   inline void move_finger_to(HeapWord* new_finger) {
 915     assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
 916     _finger = new_finger;
 917   }
 918 


< prev index next >