< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.hpp

Print this page
rev 12511 : [mq]: 8168467-use-taskentry-as-mark-stack-elem
   1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
  30 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  31 #include "gc/g1/heapRegionSet.hpp"
  32 #include "gc/shared/taskqueue.hpp"
  33 
  34 class G1CollectedHeap;
  35 class G1CMBitMap;
  36 class G1CMTask;
  37 class G1ConcurrentMark;
  38 class ConcurrentGCTimer;
  39 class G1OldTracer;
  40 class G1SurvivorRegions;
  41 typedef GenericTaskQueue<oop, mtGC>              G1CMTaskQueue;
















































  42 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
  43 
  44 // Closure used by CM during concurrent reference discovery
  45 // and reference processing (during remarking) to determine
  46 // if a particular object is alive. It is primarily used
  47 // to determine if referents of discovered reference objects
  48 // are alive. An instance is also embedded into the
  49 // reference processor as the _is_alive_non_header field
  50 class G1CMIsAliveClosure: public BoolObjectClosure {
  51   G1CollectedHeap* _g1;
  52  public:
  53   G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
  54 
  55   bool do_object_b(oop obj);
  56 };
  57 
  58 // A generic CM bit map.  This is essentially a wrapper around the BitMap
  59 // class, with one bit per (1<<_shifter) HeapWords.
  60 
  61 class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC {


 149 // Represents the overflow mark stack used by concurrent marking.
 150 //
 151 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 152 // Resizing may only happen during a STW pause when the stack is empty.
 153 //
 154 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
 155 // stack memory is split into evenly sized chunks of oops. Users can only
 156 // add or remove entries on that basis.
 157 // Chunks are filled in increasing address order. Not completely filled chunks
 158 // have a NULL element as a terminating element.
 159 //
 160 // Every chunk has a header containing a single pointer element used for memory
 161 // management. This wastes some space, but is negligible (< .1% with current sizing).
 162 //
 163 // Memory management is done using a mix of tracking a high water-mark indicating
 164 // that all chunks at a lower address are valid chunks, and a singly linked free
 165 // list connecting all empty chunks.
 166 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 167 public:
 168   // Number of oops that can fit in a single chunk.
 169   static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */;
 170 private:
 171   struct OopChunk {
 172     OopChunk* next;
 173     oop data[OopsPerChunk];
 174   };
 175 
 176   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.
 177 
 178   OopChunk* _base;               // Bottom address of allocated memory area.
 179   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 180 
 181   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 182   OopChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 183   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 184   OopChunk* volatile _chunk_list; // List of chunks currently containing data.
 185   volatile size_t _chunks_in_chunk_list;
 186   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
 187 
 188   volatile size_t _hwm;          // High water mark within the reserved space.
 189   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 190 
 191   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 192   // NULL if out of memory.
 193   OopChunk* allocate_new_chunk();


 210 
 211   // Resizes the mark stack to the given new capacity. Releases any previous
 212   // memory if successful.
 213   bool resize(size_t new_capacity);
 214 
 215  public:
 216   G1CMMarkStack();
 217   ~G1CMMarkStack();
 218 
 219   // Alignment and minimum capacity of this mark stack in number of oops.
 220   static size_t capacity_alignment();
 221 
 222   // Allocate and initialize the mark stack with the given number of oops.
 223   bool initialize(size_t initial_capacity, size_t max_capacity);
 224 
 225   // Pushes the given buffer containing at most OopsPerChunk elements on the mark
 226   // stack. If less than OopsPerChunk elements are to be pushed, the array must
 227   // be terminated with a NULL.
 228   // Returns whether the buffer contents were successfully pushed to the global mark
 229   // stack.
 230   bool par_push_chunk(oop* buffer);
 231 
 232   // Pops a chunk from this mark stack, copying them into the given buffer. This
 233   // chunk may contain up to OopsPerChunk elements. If there are less, the last
 234   // element in the array is a NULL pointer.
 235   bool par_pop_chunk(oop* buffer);
 236 
 237   // Return whether the chunk list is empty. Racy due to unsynchronized access to
 238   // _chunk_list.
 239   bool is_empty() const { return _chunk_list == NULL; }
 240 
 241   size_t capacity() const  { return _chunk_capacity; }
 242 
 243   bool is_out_of_memory() const { return _out_of_memory; }
 244   void clear_out_of_memory() { _out_of_memory = false; }
 245 
 246   bool should_expand() const { return _should_expand; }
 247   void set_should_expand(bool value) { _should_expand = value; }
 248 
 249   // Expand the stack, typically in response to an overflow condition
 250   void expand();
 251 
 252   // Return the approximate number of oops on this mark stack. Racy due to
 253   // unsynchronized access to _chunks_in_chunk_list.
 254   size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; }
 255 
 256   void set_empty();
 257 
 258   // Apply Fn to every oop on the mark stack. The mark stack must not
 259   // be modified while iterating.
 260   template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
 261 };
 262 
 263 // Root Regions are regions that are not empty at the beginning of a
 264 // marking cycle and which we might collect during an evacuation pause
 265 // while the cycle is active. Given that, during evacuation pauses, we
 266 // do not copy objects that are explicitly marked, what we have to do
 267 // for the root regions is to scan them and mark all objects reachable
 268 // from them. According to the SATB assumptions, we only need to visit
 269 // each object once during marking. So, as long as we finish this scan
 270 // before the next evacuation pause, we can copy the objects from the
 271 // root regions without having to mark them or do anything else to them.
 272 //
 273 // Currently, we only support root region scanning once (at the start
 274 // of the marking cycle) and the root regions are all the survivor


 514   void enter_first_sync_barrier(uint worker_id);
 515   void enter_second_sync_barrier(uint worker_id);
 516 
 517   // Card index of the bottom of the G1 heap. Used for biasing indices into
 518   // the card bitmaps.
 519   intptr_t _heap_bottom_card_num;
 520 
 521   // Set to true when initialization is complete
 522   bool _completed_initialization;
 523 
 524   // end_timer, true to end gc timer after ending concurrent phase.
 525   void register_concurrent_phase_end_common(bool end_timer);
 526 
 527   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 528   // true, periodically insert checks to see if this method should exit prematurely.
 529   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 530 public:
 531   // Manipulation of the global mark stack.
 532   // The push and pop operations are used by tasks for transfers
 533   // between task-local queues and the global mark stack.
 534   bool mark_stack_push(oop* arr) {
 535     if (!_global_mark_stack.par_push_chunk(arr)) {
 536       set_has_overflown();
 537       return false;
 538     }
 539     return true;
 540   }
 541   bool mark_stack_pop(oop* arr) {
 542     return _global_mark_stack.par_pop_chunk(arr);
 543   }
 544   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 545   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
 546   bool mark_stack_overflow()              { return _global_mark_stack.is_out_of_memory(); }
 547   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 548 
 549   G1CMRootRegions* root_regions() { return &_root_regions; }
 550 
 551   bool concurrent_marking_in_progress() {
 552     return _concurrent_marking_in_progress;
 553   }
 554   void set_concurrent_marking_in_progress() {
 555     _concurrent_marking_in_progress = true;
 556   }
 557   void clear_concurrent_marking_in_progress() {
 558     _concurrent_marking_in_progress = false;
 559   }
 560 
 561   void concurrent_cycle_start();
 562   void concurrent_cycle_end();
 563 
 564   void update_accum_task_vtime(int i, double vtime) {
 565     _accum_task_vtime[i] += vtime;
 566   }
 567 
 568   double all_task_accum_vtime() {
 569     double ret = 0.0;
 570     for (uint i = 0; i < _max_worker_id; ++i)
 571       ret += _accum_task_vtime[i];
 572     return ret;
 573   }
 574 
 575   // Attempts to steal an object from the task queues of other tasks
 576   bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
 577 
 578   G1ConcurrentMark(G1CollectedHeap* g1h,
 579                    G1RegionToSpaceMapper* prev_bitmap_storage,
 580                    G1RegionToSpaceMapper* next_bitmap_storage);
 581   ~G1ConcurrentMark();
 582 
 583   ConcurrentMarkThread* cmThread() { return _cmThread; }
 584 
 585   G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
 586   G1CMBitMap*   nextMarkBitMap() const { return _nextMarkBitMap; }
 587 
 588   // Returns the number of GC threads to be used in a concurrent
 589   // phase based on the number of GC threads being used in a STW
 590   // phase.
 591   uint scale_parallel_threads(uint n_par_threads);
 592 
 593   // Calculates the number of GC threads to be used in a concurrent phase.
 594   uint calc_parallel_marking_threads();
 595 
 596   // The following three are interaction between CM and


 811   // an expensive operation
 812   void decrease_limits();
 813   // it checks whether the words scanned or refs visited reached their
 814   // respective limit and calls reached_limit() if they have
 815   void check_limits() {
 816     if (_words_scanned >= _words_scanned_limit ||
 817         _refs_reached >= _refs_reached_limit) {
 818       reached_limit();
 819     }
 820   }
 821   // this is supposed to be called regularly during a marking step as
 822   // it checks a bunch of conditions that might cause the marking step
 823   // to abort
 824   void regular_clock_call();
 825   bool concurrent() { return _concurrent; }
 826 
 827   // Test whether obj might have already been passed over by the
 828   // mark bitmap scan, and so needs to be pushed onto the mark stack.
 829   bool is_below_finger(oop obj, HeapWord* global_finger) const;
 830 
 831   template<bool scan> void process_grey_object(oop obj);
 832 public:
 833   // Apply the closure on the given area of the objArray. Return the number of words
 834   // scanned.
 835   inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
 836   // It resets the task; it should be called right at the beginning of
 837   // a marking phase.
 838   void reset(G1CMBitMap* _nextMarkBitMap);
 839   // it clears all the fields that correspond to a claimed region.
 840   void clear_region_fields();
 841 
 842   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
 843 
 844   // The main method of this class which performs a marking step
 845   // trying not to exceed the given duration. However, it might exit
 846   // prematurely, according to some conditions (i.e. SATB buffers are
 847   // available for processing).
 848   void do_marking_step(double target_ms,
 849                        bool do_termination,
 850                        bool is_serial);
 851 


 876   void clear_has_aborted()      { _has_aborted = false; }
 877   bool has_timed_out()          { return _has_timed_out; }
 878   bool claimed()                { return _claimed; }
 879 
 880   void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
 881 
 882   // Increment the number of references this task has visited.
 883   void increment_refs_reached() { ++_refs_reached; }
 884 
 885   // Grey the object by marking it.  If not already marked, push it on
 886   // the local queue if below the finger.
 887   // obj is below its region's NTAMS.
 888   inline void make_reference_grey(oop obj);
 889 
 890   // Grey the object (by calling make_grey_reference) if required,
 891   // e.g. obj is below its containing region's NTAMS.
 892   // Precondition: obj is a valid heap object.
 893   inline void deal_with_reference(oop obj);
 894 
 895   // It scans an object and visits its children.
 896   inline void scan_object(oop obj);
 897 
 898   // It pushes an object on the local queue.
 899   inline void push(oop obj);
 900 
 901   // Move entries to the global stack.
 902   void move_entries_to_global_stack();
 903   // Move entries from the global stack, return true if we were successful to do so.
 904   bool get_entries_from_global_stack();
 905 
 906   // It pops and scans objects from the local queue. If partially is
 907   // true, then it stops when the queue size is of a given limit. If
 908   // partially is false, then it stops when the queue is empty.
 909   void drain_local_queue(bool partially);
 910   // It moves entries from the global stack to the local queue and
 911   // drains the local queue. If partially is true, then it stops when
 912   // both the global stack and the local queue reach a given size. If
 913   // partially if false, it tries to empty them totally.
 914   void drain_global_stack(bool partially);
 915   // It keeps picking SATB buffers and processing them until no SATB
 916   // buffers are available.
 917   void drain_satb_buffers();
 918 
 919   // moves the local finger to a new location


   1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
  30 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  31 #include "gc/g1/heapRegionSet.hpp"
  32 #include "gc/shared/taskqueue.hpp"
  33 
  34 class G1CollectedHeap;
  35 class G1CMBitMap;
  36 class G1CMTask;
  37 class G1ConcurrentMark;
  38 class ConcurrentGCTimer;
  39 class G1OldTracer;
  40 class G1SurvivorRegions;
  41 
  42 #ifdef _MSC_VER
  43 #pragma warning(push)
  44 // warning C4522: multiple assignment operators specified
  45 #pragma warning(disable:4522)
  46 #endif
  47 
  48 // This is a container class for either an oop or a continuation address for
  49 // mark stack entries. Both are pushed onto the mark stack.
  50 class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC {
  51 private:
  52   void* _holder;
  53 
  54   static const uintptr_t ArraySliceBit = 1;
  55 public:
  56   G1TaskQueueEntry() : _holder(NULL) { }
  57   G1TaskQueueEntry(oop obj) : _holder(obj) { }
  58   G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
  59 
  60   G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) {
  61     _holder = t._holder;
  62     return *this;
  63   }
  64 
  65   volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile {
  66     _holder = t._holder;
  67     return *this;
  68   }
  69 
  70   oop obj() const {
  71     assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
  72     return (oop)_holder;
  73   }
  74 
  75   HeapWord* slice() const {
  76     assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
  77     return (HeapWord*)((uintptr_t)_holder &~ ArraySliceBit);
  78   }
  79 
  80   bool is_oop() const { return !is_array_slice(); }
  81   bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
  82   bool is_null() const { return _holder == NULL; }
  83 };
  84 
  85 #ifdef _MSC_VER
  86 #pragma warning(pop)
  87 #endif
  88 
  89 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
  90 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
  91 
  92 // Closure used by CM during concurrent reference discovery
  93 // and reference processing (during remarking) to determine
  94 // if a particular object is alive. It is primarily used
  95 // to determine if referents of discovered reference objects
  96 // are alive. An instance is also embedded into the
  97 // reference processor as the _is_alive_non_header field
  98 class G1CMIsAliveClosure: public BoolObjectClosure {
  99   G1CollectedHeap* _g1;
 100  public:
 101   G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
 102 
 103   bool do_object_b(oop obj);
 104 };
 105 
 106 // A generic CM bit map.  This is essentially a wrapper around the BitMap
 107 // class, with one bit per (1<<_shifter) HeapWords.
 108 
 109 class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC {


 197 // Represents the overflow mark stack used by concurrent marking.
 198 //
 199 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 200 // Resizing may only happen during a STW pause when the stack is empty.
 201 //
 202 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
 203 // stack memory is split into evenly sized chunks of oops. Users can only
 204 // add or remove entries on that basis.
 205 // Chunks are filled in increasing address order. Not completely filled chunks
 206 // have a NULL element as a terminating element.
 207 //
 208 // Every chunk has a header containing a single pointer element used for memory
 209 // management. This wastes some space, but is negligible (< .1% with current sizing).
 210 //
 211 // Memory management is done using a mix of tracking a high water-mark indicating
 212 // that all chunks at a lower address are valid chunks, and a singly linked free
 213 // list connecting all empty chunks.
 214 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 215 public:
 216   // Number of oops that can fit in a single chunk.
 217   static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
 218 private:
 219   struct OopChunk {
 220     OopChunk* next;
 221     G1TaskQueueEntry data[EntriesPerChunk];
 222   };
 223 
 224   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.
 225 
 226   OopChunk* _base;               // Bottom address of allocated memory area.
 227   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 228 
 229   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 230   OopChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 231   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 232   OopChunk* volatile _chunk_list; // List of chunks currently containing data.
 233   volatile size_t _chunks_in_chunk_list;
 234   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
 235 
 236   volatile size_t _hwm;          // High water mark within the reserved space.
 237   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 238 
 239   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 240   // NULL if out of memory.
 241   OopChunk* allocate_new_chunk();


 258 
 259   // Resizes the mark stack to the given new capacity. Releases any previous
 260   // memory if successful.
 261   bool resize(size_t new_capacity);
 262 
 263  public:
 264   G1CMMarkStack();
 265   ~G1CMMarkStack();
 266 
 267   // Alignment and minimum capacity of this mark stack in number of oops.
 268   static size_t capacity_alignment();
 269 
 270   // Allocate and initialize the mark stack with the given number of oops.
 271   bool initialize(size_t initial_capacity, size_t max_capacity);
 272 
 273   // Pushes the given buffer containing at most OopsPerChunk elements on the mark
 274   // stack. If less than OopsPerChunk elements are to be pushed, the array must
 275   // be terminated with a NULL.
 276   // Returns whether the buffer contents were successfully pushed to the global mark
 277   // stack.
 278   bool par_push_chunk(G1TaskQueueEntry* buffer);
 279 
 280   // Pops a chunk from this mark stack, copying them into the given buffer. This
 281   // chunk may contain up to OopsPerChunk elements. If there are less, the last
 282   // element in the array is a NULL pointer.
 283   bool par_pop_chunk(G1TaskQueueEntry* buffer);
 284 
 285   // Return whether the chunk list is empty. Racy due to unsynchronized access to
 286   // _chunk_list.
 287   bool is_empty() const { return _chunk_list == NULL; }
 288 
 289   size_t capacity() const  { return _chunk_capacity; }
 290 
 291   bool is_out_of_memory() const { return _out_of_memory; }
 292   void clear_out_of_memory() { _out_of_memory = false; }
 293 
 294   bool should_expand() const { return _should_expand; }
 295   void set_should_expand(bool value) { _should_expand = value; }
 296 
 297   // Expand the stack, typically in response to an overflow condition
 298   void expand();
 299 
 300   // Return the approximate number of oops on this mark stack. Racy due to
 301   // unsynchronized access to _chunks_in_chunk_list.
 302   size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
 303 
 304   void set_empty();
 305 
 306   // Apply Fn to every oop on the mark stack. The mark stack must not
 307   // be modified while iterating.
 308   template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
 309 };
 310 
 311 // Root Regions are regions that are not empty at the beginning of a
 312 // marking cycle and which we might collect during an evacuation pause
 313 // while the cycle is active. Given that, during evacuation pauses, we
 314 // do not copy objects that are explicitly marked, what we have to do
 315 // for the root regions is to scan them and mark all objects reachable
 316 // from them. According to the SATB assumptions, we only need to visit
 317 // each object once during marking. So, as long as we finish this scan
 318 // before the next evacuation pause, we can copy the objects from the
 319 // root regions without having to mark them or do anything else to them.
 320 //
 321 // Currently, we only support root region scanning once (at the start
 322 // of the marking cycle) and the root regions are all the survivor


 562   void enter_first_sync_barrier(uint worker_id);
 563   void enter_second_sync_barrier(uint worker_id);
 564 
 565   // Card index of the bottom of the G1 heap. Used for biasing indices into
 566   // the card bitmaps.
 567   intptr_t _heap_bottom_card_num;
 568 
 569   // Set to true when initialization is complete
 570   bool _completed_initialization;
 571 
 572   // end_timer, true to end gc timer after ending concurrent phase.
 573   void register_concurrent_phase_end_common(bool end_timer);
 574 
 575   // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
 576   // true, periodically insert checks to see if this method should exit prematurely.
 577   void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
 578 public:
 579   // Manipulation of the global mark stack.
 580   // The push and pop operations are used by tasks for transfers
 581   // between task-local queues and the global mark stack.
 582   bool mark_stack_push(G1TaskQueueEntry* arr) {
 583     if (!_global_mark_stack.par_push_chunk(arr)) {
 584       set_has_overflown();
 585       return false;
 586     }
 587     return true;
 588   }
 589   bool mark_stack_pop(G1TaskQueueEntry* arr) {
 590     return _global_mark_stack.par_pop_chunk(arr);
 591   }
 592   size_t mark_stack_size()                { return _global_mark_stack.size(); }
 593   size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
 594   bool mark_stack_overflow()              { return _global_mark_stack.is_out_of_memory(); }
 595   bool mark_stack_empty()                 { return _global_mark_stack.is_empty(); }
 596 
 597   G1CMRootRegions* root_regions() { return &_root_regions; }
 598 
 599   bool concurrent_marking_in_progress() {
 600     return _concurrent_marking_in_progress;
 601   }
 602   void set_concurrent_marking_in_progress() {
 603     _concurrent_marking_in_progress = true;
 604   }
 605   void clear_concurrent_marking_in_progress() {
 606     _concurrent_marking_in_progress = false;
 607   }
 608 
 609   void concurrent_cycle_start();
 610   void concurrent_cycle_end();
 611 
 612   void update_accum_task_vtime(int i, double vtime) {
 613     _accum_task_vtime[i] += vtime;
 614   }
 615 
 616   double all_task_accum_vtime() {
 617     double ret = 0.0;
 618     for (uint i = 0; i < _max_worker_id; ++i)
 619       ret += _accum_task_vtime[i];
 620     return ret;
 621   }
 622 
 623   // Attempts to steal an object from the task queues of other tasks
 624   bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry);
 625 
 626   G1ConcurrentMark(G1CollectedHeap* g1h,
 627                    G1RegionToSpaceMapper* prev_bitmap_storage,
 628                    G1RegionToSpaceMapper* next_bitmap_storage);
 629   ~G1ConcurrentMark();
 630 
 631   ConcurrentMarkThread* cmThread() { return _cmThread; }
 632 
 633   G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
 634   G1CMBitMap*   nextMarkBitMap() const { return _nextMarkBitMap; }
 635 
 636   // Returns the number of GC threads to be used in a concurrent
 637   // phase based on the number of GC threads being used in a STW
 638   // phase.
 639   uint scale_parallel_threads(uint n_par_threads);
 640 
 641   // Calculates the number of GC threads to be used in a concurrent phase.
 642   uint calc_parallel_marking_threads();
 643 
 644   // The following three are interaction between CM and


 859   // an expensive operation
 860   void decrease_limits();
 861   // it checks whether the words scanned or refs visited reached their
 862   // respective limit and calls reached_limit() if they have
 863   void check_limits() {
 864     if (_words_scanned >= _words_scanned_limit ||
 865         _refs_reached >= _refs_reached_limit) {
 866       reached_limit();
 867     }
 868   }
 869   // this is supposed to be called regularly during a marking step as
 870   // it checks a bunch of conditions that might cause the marking step
 871   // to abort
 872   void regular_clock_call();
 873   bool concurrent() { return _concurrent; }
 874 
 875   // Test whether obj might have already been passed over by the
 876   // mark bitmap scan, and so needs to be pushed onto the mark stack.
 877   bool is_below_finger(oop obj, HeapWord* global_finger) const;
 878 
 879   template<bool scan> void process_grey_object(G1TaskQueueEntry task_entry);
 880 public:
 881   // Apply the closure on the given area of the objArray. Return the number of words
 882   // scanned.
 883   inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
 884   // It resets the task; it should be called right at the beginning of
 885   // a marking phase.
 886   void reset(G1CMBitMap* _nextMarkBitMap);
 887   // it clears all the fields that correspond to a claimed region.
 888   void clear_region_fields();
 889 
 890   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
 891 
 892   // The main method of this class which performs a marking step
 893   // trying not to exceed the given duration. However, it might exit
 894   // prematurely, according to some conditions (i.e. SATB buffers are
 895   // available for processing).
 896   void do_marking_step(double target_ms,
 897                        bool do_termination,
 898                        bool is_serial);
 899 


 924   void clear_has_aborted()      { _has_aborted = false; }
 925   bool has_timed_out()          { return _has_timed_out; }
 926   bool claimed()                { return _claimed; }
 927 
 928   void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
 929 
 930   // Increment the number of references this task has visited.
 931   void increment_refs_reached() { ++_refs_reached; }
 932 
 933   // Grey the object by marking it.  If not already marked, push it on
 934   // the local queue if below the finger.
 935   // obj is below its region's NTAMS.
 936   inline void make_reference_grey(oop obj);
 937 
 938   // Grey the object (by calling make_grey_reference) if required,
 939   // e.g. obj is below its containing region's NTAMS.
 940   // Precondition: obj is a valid heap object.
 941   inline void deal_with_reference(oop obj);
 942 
 943   // It scans an object and visits its children.
 944   inline void scan_object(G1TaskQueueEntry task_entry);
 945 
 946   // It pushes an object on the local queue.
 947   inline void push(G1TaskQueueEntry task_entry);
 948 
 949   // Move entries to the global stack.
 950   void move_entries_to_global_stack();
 951   // Move entries from the global stack, return true if we were successful to do so.
 952   bool get_entries_from_global_stack();
 953 
 954   // It pops and scans objects from the local queue. If partially is
 955   // true, then it stops when the queue size is of a given limit. If
 956   // partially is false, then it stops when the queue is empty.
 957   void drain_local_queue(bool partially);
 958   // It moves entries from the global stack to the local queue and
 959   // drains the local queue. If partially is true, then it stops when
 960   // both the global stack and the local queue reach a given size. If
 961   // partially if false, it tries to empty them totally.
 962   void drain_global_stack(bool partially);
 963   // It keeps picking SATB buffers and processing them until no SATB
 964   // buffers are available.
 965   void drain_satb_buffers();
 966 
 967   // moves the local finger to a new location


< prev index next >