< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.hpp

Print this page
rev 11973 : imported patch 8159422-high-mark-stack-contention
rev 11974 : imported patch 8159422-mikael-review
rev 11976 : imported patch 8159422-kim-review
rev 11977 : imported patch 8159422-kim-review2
rev 11978 : imported patch 8159422-kim-review3


 132     return mark_distance();
 133   }
 134 
 135   G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 136 
 137   // Initializes the underlying BitMap to cover the given area.
 138   void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
 139 
 140   // Write marks.
 141   inline void mark(HeapWord* addr);
 142   inline void clear(HeapWord* addr);
 143   inline bool parMark(HeapWord* addr);
 144 
 145   void clear_range(MemRegion mr);
 146 };
 147 
 148 // Represents the overflow mark stack used by concurrent marking.
 149 //
 150 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 151 // Resizing may only happen during a STW pause when the stack is empty.













 152 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 153   ReservedSpace _reserved_space; // Space currently reserved for the mark stack.












 154 
 155   oop* _base;                    // Bottom address of allocated memory area.
 156   size_t _capacity;              // Maximum number of elements.
 157   size_t _index;                 // One more than last occupied index.


















 158 
 159   size_t _saved_index;           // Value of _index saved at start of GC to detect mark stack modifications during that time.




 160 
 161   bool  _overflow;
 162   bool  _should_expand;
 163 
 164   // Resizes the mark stack to the given new capacity. Releases any previous
 165   // memory if successful.
 166   bool resize(size_t new_capacity);
 167 
 168   bool stack_modified() const { return _index != _saved_index; }
 169  public:
 170   G1CMMarkStack();
 171   ~G1CMMarkStack();
 172 
 173   bool allocate(size_t capacity);

 174 
 175   // Pushes the first "n" elements of the given buffer on the stack.
 176   void par_push_arr(oop* buffer, size_t n);
 177 
 178   // Moves up to max elements from the stack into the given buffer. Returns
 179   // the number of elements pushed, and false if the array has been empty.
 180   // Returns true if the buffer contains at least one element.
 181   bool par_pop_arr(oop* buffer, size_t max, size_t* n);











 182 
 183   bool is_empty() const { return _index == 0; }
 184   size_t capacity() const  { return _capacity; }
 185 
 186   bool overflow() const { return _overflow; }
 187   void clear_overflow() { _overflow = false; }
 188 
 189   bool should_expand() const { return _should_expand; }
 190   void set_should_expand(bool value) { _should_expand = value; }
 191 
 192   // Expand the stack, typically in response to an overflow condition
 193   void expand();
 194 
 195   size_t size() const { return _index; }
 196 
 197   void set_empty() { _index = 0; clear_overflow(); }
 198 
 199   // Record the current index.
 200   void note_start_of_gc();
 201 
 202   // Make sure that we have not added any entries to the stack during GC.
 203   void note_end_of_gc();
 204 
 205   // Apply fn to each oop in the mark stack, up to the bound recorded
 206   // via one of the above "note" functions.  The mark stack must not
 207   // be modified while iterating.
 208   template<typename Fn> void iterate(Fn fn);
 209 };
 210 
 211 // Root Regions are regions that are not empty at the beginning of a
 212 // marking cycle and which we might collect during an evacuation pause
 213 // while the cycle is active. Given that, during evacuation pauses, we
 214 // do not copy objects that are explicitly marked, what we have to do
 215 // for the root regions is to scan them and mark all objects reachable
 216 // from them. According to the SATB assumptions, we only need to visit
 217 // each object once during marking. So, as long as we finish this scan
 218 // before the next evacuation pause, we can copy the objects from the
 219 // root regions without having to mark them or do anything else to them.
 220 //
 221 // Currently, we only support root region scanning once (at the start
 222 // of the marking cycle) and the root regions are all the survivor
 223 // regions populated during the initial-mark pause.
 224 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
 225 private:
 226   const G1SurvivorRegions* _survivors;
 227   G1ConcurrentMark*        _cm;
 228 


 261   // have been claimed.
 262   void scan_finished();
 263 
 264   // If CM threads are still scanning root regions, wait until they
 265   // are done. Return true if we had to wait, false otherwise.
 266   bool wait_until_scan_finished();
 267 };
 268 
 269 class ConcurrentMarkThread;
 270 
 271 class G1ConcurrentMark: public CHeapObj<mtGC> {
 272   friend class ConcurrentMarkThread;
 273   friend class G1ParNoteEndTask;
 274   friend class G1VerifyLiveDataClosure;
 275   friend class G1CMRefProcTaskProxy;
 276   friend class G1CMRefProcTaskExecutor;
 277   friend class G1CMKeepAliveAndDrainClosure;
 278   friend class G1CMDrainMarkingStackClosure;
 279   friend class G1CMBitMapClosure;
 280   friend class G1CMConcurrentMarkingTask;
 281   friend class G1CMMarkStack;
 282   friend class G1CMRemarkTask;
 283   friend class G1CMTask;
 284 
 285 protected:
 286   ConcurrentMarkThread* _cmThread;   // The thread doing the work
 287   G1CollectedHeap*      _g1h;        // The heap
 288   uint                  _parallel_marking_threads; // The number of marking
 289                                                    // threads we're using
 290   uint                  _max_parallel_marking_threads; // Max number of marking
 291                                                        // threads we'll ever use
 292   double                _sleep_factor; // How much we have to sleep, with
 293                                        // respect to the work we just did, to
 294                                        // meet the marking overhead goal
 295   double                _marking_task_overhead; // Marking target overhead for
 296                                                 // a single task
 297 
 298   FreeRegionList        _cleanup_list;
 299 
 300   // Concurrent marking support structures
 301   G1CMBitMap              _markBitMap1;


 462   // Methods to enter the two overflow sync barriers
 463   void enter_first_sync_barrier(uint worker_id);
 464   void enter_second_sync_barrier(uint worker_id);
 465 
 466   // Card index of the bottom of the G1 heap. Used for biasing indices into
 467   // the card bitmaps.
 468   intptr_t _heap_bottom_card_num;
 469 
 470   // Set to true when initialization is complete
 471   bool _completed_initialization;
 472 
 473   // end_timer, true to end gc timer after ending concurrent phase.
 474   void register_concurrent_phase_end_common(bool end_timer);
 475 
 476   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 477   // true, periodically insert checks to see if this method should exit prematurely.
 478   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 479 public:
 480   // Manipulation of the global mark stack.
 481   // The push and pop operations are used by tasks for transfers
 482   // between task-local queues and the global mark stack, and use
 483   // locking for concurrency safety.
 484   bool mark_stack_push(oop* arr, size_t n) {
 485     _global_mark_stack.par_push_arr(arr, n);
 486     if (_global_mark_stack.overflow()) {
 487       set_has_overflown();
 488       return false;
 489     }
 490     return true;
 491   }
 492   void mark_stack_pop(oop* arr, size_t max, size_t* n) {
 493     _global_mark_stack.par_pop_arr(arr, max, n);
 494   }
 495   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 496   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
 497   bool mark_stack_overflow()              { return _global_mark_stack.overflow(); }
 498   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 499 
 500   G1CMRootRegions* root_regions() { return &_root_regions; }
 501 
 502   bool concurrent_marking_in_progress() {
 503     return _concurrent_marking_in_progress;
 504   }
 505   void set_concurrent_marking_in_progress() {
 506     _concurrent_marking_in_progress = true;
 507   }
 508   void clear_concurrent_marking_in_progress() {
 509     _concurrent_marking_in_progress = false;
 510   }
 511 
 512   void concurrent_cycle_start();
 513   void concurrent_cycle_end();
 514 
 515   void update_accum_task_vtime(int i, double vtime) {
 516     _accum_task_vtime[i] += vtime;
 517   }


 582   // Scan a single root region and mark everything reachable from it.
 583   void scanRootRegion(HeapRegion* hr);
 584 
 585   // Do concurrent phase of marking, to a tentative transitive closure.
 586   void mark_from_roots();
 587 
 588   void checkpointRootsFinal(bool clear_all_soft_refs);
 589   void checkpointRootsFinalWork();
 590   void cleanup();
 591   void complete_cleanup();
 592 
 593   // Mark in the previous bitmap.  NB: this is usually read-only, so use
 594   // this carefully!
 595   inline void markPrev(oop p);
 596 
 597   // Clears marks for all objects in the given range, for the prev or
 598   // next bitmaps.  NB: the previous bitmap is usually
 599   // read-only, so use this carefully!
 600   void clearRangePrevBitmap(MemRegion mr);
 601 
 602   // Notify data structures that a GC has started.
 603   void note_start_of_gc() {
 604     _global_mark_stack.note_start_of_gc();
 605   }
 606 
 607   // Notify data structures that a GC is finished.
 608   void note_end_of_gc() {
 609     _global_mark_stack.note_end_of_gc();
 610   }
 611 
 612   // Verify that there are no CSet oops on the stacks (taskqueues /
 613   // global mark stack) and fingers (global / per-task).
 614   // If marking is not in progress, it's a no-op.
 615   void verify_no_cset_oops() PRODUCT_RETURN;
 616 
 617   inline bool isPrevMarked(oop p) const;
 618 
 619   inline bool do_yield_check();
 620 
 621   // Abandon current marking iteration due to a Full GC.
 622   void abort();
 623 
 624   bool has_aborted()      { return _has_aborted; }
 625 
 626   void print_summary_info();
 627 
 628   void print_worker_threads_on(outputStream* st) const;
 629   void threads_do(ThreadClosure* tc) const;
 630 
 631   void print_on_error(outputStream* st) const;


 653   // Aggregates the per-card liveness data based on the current marking. Also sets
 654   // the amount of marked bytes for each region.
 655   void create_live_data();
 656 
 657   void finalize_live_data();
 658 
 659   void verify_live_data();
 660 };
 661 
 662 // A class representing a marking task.
 663 class G1CMTask : public TerminatorTerminator {
 664 private:
 665   enum PrivateConstants {
 666     // The regular clock call is called once the scanned words reaches
 667     // this limit
 668     words_scanned_period          = 12*1024,
 669     // The regular clock call is called once the number of visited
 670     // references reaches this limit
 671     refs_reached_period           = 384,
 672     // Initial value for the hash seed, used in the work stealing code
 673     init_hash_seed                = 17,
 674     // How many entries will be transferred between global stack and
 675     // local queues at once.
 676     global_stack_transfer_size    = 1024
 677   };
 678 
 679   uint                        _worker_id;
 680   G1CollectedHeap*            _g1h;
 681   G1ConcurrentMark*           _cm;
 682   G1CMBitMap*                 _nextMarkBitMap;
 683   // the task queue of this task
 684   G1CMTaskQueue*              _task_queue;
 685 private:
 686   // the task queue set---needed for stealing
 687   G1CMTaskQueueSet*           _task_queues;
 688   // indicates whether the task has been claimed---this is only  for
 689   // debugging purposes
 690   bool                        _claimed;
 691 
 692   // number of calls to this task
 693   int                         _calls;
 694 
 695   // when the virtual timer reaches this time, the marking step should
 696   // exit


 841 
 842   // Increment the number of references this task has visited.
 843   void increment_refs_reached() { ++_refs_reached; }
 844 
 845   // Grey the object by marking it.  If not already marked, push it on
 846   // the local queue if below the finger.
 847   // obj is below its region's NTAMS.
 848   inline void make_reference_grey(oop obj);
 849 
 850   // Grey the object (by calling make_grey_reference) if required,
 851   // e.g. obj is below its containing region's NTAMS.
 852   // Precondition: obj is a valid heap object.
 853   inline void deal_with_reference(oop obj);
 854 
 855   // It scans an object and visits its children.
 856   inline void scan_object(oop obj);
 857 
 858   // It pushes an object on the local queue.
 859   inline void push(oop obj);
 860 
 861   // These two move entries to/from the global stack.
 862   void move_entries_to_global_stack();
 863   void get_entries_from_global_stack();

 864 
 865   // It pops and scans objects from the local queue. If partially is
 866   // true, then it stops when the queue size is of a given limit. If
 867   // partially is false, then it stops when the queue is empty.
 868   void drain_local_queue(bool partially);
 869   // It moves entries from the global stack to the local queue and
 870   // drains the local queue. If partially is true, then it stops when
 871   // both the global stack and the local queue reach a given size. If
 872   // partially if false, it tries to empty them totally.
 873   void drain_global_stack(bool partially);
 874   // It keeps picking SATB buffers and processing them until no SATB
 875   // buffers are available.
 876   void drain_satb_buffers();
 877 
 878   // moves the local finger to a new location
 879   inline void move_finger_to(HeapWord* new_finger) {
 880     assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
 881     _finger = new_finger;
 882   }
 883 




 132     return mark_distance();
 133   }
 134 
 135   G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 136 
 137   // Initializes the underlying BitMap to cover the given area.
 138   void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
 139 
 140   // Write marks.
 141   inline void mark(HeapWord* addr);
 142   inline void clear(HeapWord* addr);
 143   inline bool parMark(HeapWord* addr);
 144 
 145   void clear_range(MemRegion mr);
 146 };
 147 
 148 // Represents the overflow mark stack used by concurrent marking.
 149 //
 150 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 151 // Resizing may only happen during a STW pause when the stack is empty.
 152 //
 153 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
 154 // stack memory is split into evenly sized chunks of oops. Users can only
 155 // add or remove entries on that basis.
 156 // Chunks are filled in increasing address order. Not completely filled chunks
 157 // have a NULL element as a terminating element.
 158 //
 159 // Every chunk has a header containing a single pointer element used for memory
 160 // management. This wastes some space, but is negligible (< .1% with current sizing).
 161 //
 162 // Memory management is done using a mix of tracking a high water-mark indicating
 163 // that all chunks at a lower address are valid chunks, and a singly linked free
 164 // list connecting all empty chunks.
 165 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 166 public:
 167   // Number of oops that can fit in a single chunk.
 168   static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */;
 169 private:
 170   struct OopChunk {
 171     OopChunk* next;
 172     oop data[OopsPerChunk];
 173   };
 174 
 175   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.
 176 
 177   OopChunk* _base;               // Bottom address of allocated memory area.
 178   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 179 
 180   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 181   OopChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 182   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 183   OopChunk* volatile _chunk_list; // List of chunks currently containing data.
 184   volatile size_t _chunks_in_chunk_list;
 185   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
 186  
 187   volatile size_t _hwm;          // High water mark within the reserved space.
 188   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 189  
 190   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 191   // NULL if out of memory.
 192   OopChunk* allocate_new_chunk();
 193 
 194   volatile bool _out_of_memory;
 195 
 196   // Atomically add the given chunk to the list.
 197   void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
 198   // Atomically remove and return a chunk from the given list. Returns NULL if the
 199   // list is empty.
 200   OopChunk* remove_chunk_from_list(OopChunk* volatile* list); 
 201 
 202   void add_chunk_to_chunk_list(OopChunk* elem);
 203   void add_chunk_to_free_list(OopChunk* elem);
 204 
 205   OopChunk* remove_chunk_from_chunk_list();
 206   OopChunk* remove_chunk_from_free_list();
 207 

 208   bool  _should_expand;
 209 
 210   // Resizes the mark stack to the given new capacity. Releases any previous
 211   // memory if successful.
 212   bool resize(size_t new_capacity);
 213 

 214  public:
 215   G1CMMarkStack();
 216   ~G1CMMarkStack();
 217 
 218   // Alignment and minimum capacity of this mark stack in number of oops.
 219   static size_t capacity_alignment();
 220 
 221   // Allocate and initialize the mark stack with the given number of oops.
 222   bool initialize(size_t initial_capacity, size_t max_capacity);
 223 
 224   // Pushes the given buffer containing at most OopsPerChunk elements on the mark
 225   // stack. If less than OopsPerChunk elements are to be pushed, the array must
 226   // be terminated with a NULL.
 227   // Returns whether the buffer contents were successfully pushed to the global mark
 228   // stack.
 229   bool par_push_chunk(oop* buffer);
 230 
 231   // Pops a chunk from this mark stack, copying them into the given buffer. This
 232   // chunk may contain up to OopsPerChunk elements. If there are less, the last
 233   // element in the array is a NULL pointer.
 234   bool par_pop_chunk(oop* buffer);
 235 
 236   // Return whether the chunk list is empty. Racy due to unsynchronized access to 
 237   // _chunk_list.
 238   bool is_empty() const { return _chunk_list == NULL; }
 239 
 240   size_t capacity() const  { return _chunk_capacity; }

 241 
 242   bool is_out_of_memory() const { return _out_of_memory; }
 243   void clear_out_of_memory() { _out_of_memory = false; }
 244 
 245   bool should_expand() const { return _should_expand; }
 246   void set_should_expand(bool value) { _should_expand = value; }
 247 
 248   // Expand the stack, typically in response to an overflow condition
 249   void expand();
 250 
 251   // Return the approximate number of oops on this mark stack. Racy due to
 252   // unsynchronized access to _chunks_in_chunk_list.
 253   size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; }
 254  
 255   void set_empty();

 256 
 257   // Apply Fn to every oop on the mark stack. The mark stack must not




 258   // be modified while iterating.
 259   template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
 260 };
 261 
 262 // Root Regions are regions that are not empty at the beginning of a
 263 // marking cycle and which we might collect during an evacuation pause
 264 // while the cycle is active. Given that, during evacuation pauses, we
 265 // do not copy objects that are explicitly marked, what we have to do
 266 // for the root regions is to scan them and mark all objects reachable
 267 // from them. According to the SATB assumptions, we only need to visit
 268 // each object once during marking. So, as long as we finish this scan
 269 // before the next evacuation pause, we can copy the objects from the
 270 // root regions without having to mark them or do anything else to them.
 271 //
 272 // Currently, we only support root region scanning once (at the start
 273 // of the marking cycle) and the root regions are all the survivor
 274 // regions populated during the initial-mark pause.
 275 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
 276 private:
 277   const G1SurvivorRegions* _survivors;
 278   G1ConcurrentMark*        _cm;
 279 


 312   // have been claimed.
 313   void scan_finished();
 314 
 315   // If CM threads are still scanning root regions, wait until they
 316   // are done. Return true if we had to wait, false otherwise.
 317   bool wait_until_scan_finished();
 318 };
 319 
 320 class ConcurrentMarkThread;
 321 
 322 class G1ConcurrentMark: public CHeapObj<mtGC> {
 323   friend class ConcurrentMarkThread;
 324   friend class G1ParNoteEndTask;
 325   friend class G1VerifyLiveDataClosure;
 326   friend class G1CMRefProcTaskProxy;
 327   friend class G1CMRefProcTaskExecutor;
 328   friend class G1CMKeepAliveAndDrainClosure;
 329   friend class G1CMDrainMarkingStackClosure;
 330   friend class G1CMBitMapClosure;
 331   friend class G1CMConcurrentMarkingTask;

 332   friend class G1CMRemarkTask;
 333   friend class G1CMTask;
 334 
 335 protected:
 336   ConcurrentMarkThread* _cmThread;   // The thread doing the work
 337   G1CollectedHeap*      _g1h;        // The heap
 338   uint                  _parallel_marking_threads; // The number of marking
 339                                                    // threads we're using
 340   uint                  _max_parallel_marking_threads; // Max number of marking
 341                                                        // threads we'll ever use
 342   double                _sleep_factor; // How much we have to sleep, with
 343                                        // respect to the work we just did, to
 344                                        // meet the marking overhead goal
 345   double                _marking_task_overhead; // Marking target overhead for
 346                                                 // a single task
 347 
 348   FreeRegionList        _cleanup_list;
 349 
 350   // Concurrent marking support structures
 351   G1CMBitMap              _markBitMap1;


 512   // Methods to enter the two overflow sync barriers
 513   void enter_first_sync_barrier(uint worker_id);
 514   void enter_second_sync_barrier(uint worker_id);
 515 
 516   // Card index of the bottom of the G1 heap. Used for biasing indices into
 517   // the card bitmaps.
 518   intptr_t _heap_bottom_card_num;
 519 
 520   // Set to true when initialization is complete
 521   bool _completed_initialization;
 522 
 523   // end_timer, true to end gc timer after ending concurrent phase.
 524   void register_concurrent_phase_end_common(bool end_timer);
 525 
 526   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 527   // true, periodically insert checks to see if this method should exit prematurely.
 528   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 529 public:
 530   // Manipulation of the global mark stack.
 531   // The push and pop operations are used by tasks for transfers
 532   // between task-local queues and the global mark stack.
 533   bool mark_stack_push(oop* arr) {
 534     if (!_global_mark_stack.par_push_chunk(arr)) {


 535       set_has_overflown();
 536       return false;
 537     }
 538     return true;
 539   }
 540   bool mark_stack_pop(oop* arr) {
 541     return _global_mark_stack.par_pop_chunk(arr);
 542   }
 543   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 544   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
 545   bool mark_stack_overflow()              { return _global_mark_stack.is_out_of_memory(); }
 546   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 547 
 548   G1CMRootRegions* root_regions() { return &_root_regions; }
 549 
 550   bool concurrent_marking_in_progress() {
 551     return _concurrent_marking_in_progress;
 552   }
 553   void set_concurrent_marking_in_progress() {
 554     _concurrent_marking_in_progress = true;
 555   }
 556   void clear_concurrent_marking_in_progress() {
 557     _concurrent_marking_in_progress = false;
 558   }
 559 
 560   void concurrent_cycle_start();
 561   void concurrent_cycle_end();
 562 
 563   void update_accum_task_vtime(int i, double vtime) {
 564     _accum_task_vtime[i] += vtime;
 565   }


 630   // Scan a single root region and mark everything reachable from it.
 631   void scanRootRegion(HeapRegion* hr);
 632 
 633   // Do concurrent phase of marking, to a tentative transitive closure.
 634   void mark_from_roots();
 635 
 636   void checkpointRootsFinal(bool clear_all_soft_refs);
 637   void checkpointRootsFinalWork();
 638   void cleanup();
 639   void complete_cleanup();
 640 
 641   // Mark in the previous bitmap.  NB: this is usually read-only, so use
 642   // this carefully!
 643   inline void markPrev(oop p);
 644 
 645   // Clears marks for all objects in the given range, for the prev or
 646   // next bitmaps.  NB: the previous bitmap is usually
 647   // read-only, so use this carefully!
 648   void clearRangePrevBitmap(MemRegion mr);
 649 










 650   // Verify that there are no CSet oops on the stacks (taskqueues /
 651   // global mark stack) and fingers (global / per-task).
 652   // If marking is not in progress, it's a no-op.
 653   void verify_no_cset_oops() PRODUCT_RETURN;
 654 
 655   inline bool isPrevMarked(oop p) const;
 656 
 657   inline bool do_yield_check();
 658 
 659   // Abandon current marking iteration due to a Full GC.
 660   void abort();
 661 
 662   bool has_aborted()      { return _has_aborted; }
 663 
 664   void print_summary_info();
 665 
 666   void print_worker_threads_on(outputStream* st) const;
 667   void threads_do(ThreadClosure* tc) const;
 668 
 669   void print_on_error(outputStream* st) const;


 691   // Aggregates the per-card liveness data based on the current marking. Also sets
 692   // the amount of marked bytes for each region.
 693   void create_live_data();
 694 
 695   void finalize_live_data();
 696 
 697   void verify_live_data();
 698 };
 699 
 700 // A class representing a marking task.
 701 class G1CMTask : public TerminatorTerminator {
 702 private:
 703   enum PrivateConstants {
 704     // The regular clock call is called once the scanned words reaches
 705     // this limit
 706     words_scanned_period          = 12*1024,
 707     // The regular clock call is called once the number of visited
 708     // references reaches this limit
 709     refs_reached_period           = 384,
 710     // Initial value for the hash seed, used in the work stealing code
 711     init_hash_seed                = 17



 712   };
 713 
 714   uint                        _worker_id;
 715   G1CollectedHeap*            _g1h;
 716   G1ConcurrentMark*           _cm;
 717   G1CMBitMap*                 _nextMarkBitMap;
 718   // the task queue of this task
 719   G1CMTaskQueue*              _task_queue;
 720 private:
 721   // the task queue set---needed for stealing
 722   G1CMTaskQueueSet*           _task_queues;
 723   // indicates whether the task has been claimed---this is only  for
 724   // debugging purposes
 725   bool                        _claimed;
 726 
 727   // number of calls to this task
 728   int                         _calls;
 729 
 730   // when the virtual timer reaches this time, the marking step should
 731   // exit


 876 
 877   // Increment the number of references this task has visited.
 878   void increment_refs_reached() { ++_refs_reached; }
 879 
 880   // Grey the object by marking it.  If not already marked, push it on
 881   // the local queue if below the finger.
 882   // obj is below its region's NTAMS.
 883   inline void make_reference_grey(oop obj);
 884 
 885   // Grey the object (by calling make_grey_reference) if required,
 886   // e.g. obj is below its containing region's NTAMS.
 887   // Precondition: obj is a valid heap object.
 888   inline void deal_with_reference(oop obj);
 889 
 890   // It scans an object and visits its children.
 891   inline void scan_object(oop obj);
 892 
 893   // It pushes an object on the local queue.
 894   inline void push(oop obj);
 895 
 896   // Move entries to the global stack.
 897   void move_entries_to_global_stack();
 898   // Move entries from the global stack, return true if we were successful to do so.
 899   bool get_entries_from_global_stack();
 900 
 901   // It pops and scans objects from the local queue. If partially is
 902   // true, then it stops when the queue size is of a given limit. If
 903   // partially is false, then it stops when the queue is empty.
 904   void drain_local_queue(bool partially);
 905   // It moves entries from the global stack to the local queue and
 906   // drains the local queue. If partially is true, then it stops when
 907   // both the global stack and the local queue reach a given size. If
 908   // partially if false, it tries to empty them totally.
 909   void drain_global_stack(bool partially);
 910   // It keeps picking SATB buffers and processing them until no SATB
 911   // buffers are available.
 912   void drain_satb_buffers();
 913 
 914   // moves the local finger to a new location
 915   inline void move_finger_to(HeapWord* new_finger) {
 916     assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
 917     _finger = new_finger;
 918   }
 919 


< prev index next >