< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.hpp

Print this page
rev 12666 : imported patch 8168467-use-taskentry-as-mark-stack-elem
rev 12667 : imported patch 8168467-kim-review


  35 class G1CMBitMap;
  36 class G1CMTask;
  37 class G1ConcurrentMark;
  38 class ConcurrentGCTimer;
  39 class G1OldTracer;
  40 class G1SurvivorRegions;
  41 
  42 #ifdef _MSC_VER
  43 #pragma warning(push)
  44 // warning C4522: multiple assignment operators specified
  45 #pragma warning(disable:4522)
  46 #endif
  47 
  48 // This is a container class for either an oop or a continuation address for
  49 // mark stack entries. Both are pushed onto the mark stack.
  50 class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC {
  51 private:
  52   void* _holder;
  53 
  54   static const uintptr_t ArraySliceBit = 1;





  55 public:

  56   G1TaskQueueEntry() : _holder(NULL) { }
  57   G1TaskQueueEntry(oop obj) : _holder(obj) { }
  58   G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }

  59 
  60   G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) {
  61     _holder = t._holder;
  62     return *this;
  63   }
  64 
  65   volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile {
  66     _holder = t._holder;
  67     return *this;
  68   }
  69 
  70   oop obj() const {
  71     assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
  72     return (oop)_holder;
  73   }
  74 
  75   HeapWord* slice() const {
  76     assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
  77     return (HeapWord*)((uintptr_t)_holder &~ ArraySliceBit);
  78   }
  79 
  80   bool is_oop() const { return !is_array_slice(); }
  81   bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
  82   bool is_null() const { return _holder == NULL; }
  83 };
  84 
  85 #ifdef _MSC_VER
  86 #pragma warning(pop)
  87 #endif
  88 
  89 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
  90 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
  91 
  92 // Closure used by CM during concurrent reference discovery
  93 // and reference processing (during remarking) to determine
  94 // if a particular object is alive. It is primarily used
  95 // to determine if referents of discovered reference objects
  96 // are alive. An instance is also embedded into the
  97 // reference processor as the _is_alive_non_header field


 199 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 200 // Resizing may only happen during a STW pause when the stack is empty.
 201 //
 202 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
 203 // stack memory is split into evenly sized chunks of oops. Users can only
 204 // add or remove entries on that basis.
 205 // Chunks are filled in increasing address order. Not completely filled chunks
 206 // have a NULL element as a terminating element.
 207 //
 208 // Every chunk has a header containing a single pointer element used for memory
 209 // management. This wastes some space, but is negligible (< .1% with current sizing).
 210 //
 211 // Memory management is done using a mix of tracking a high water-mark indicating
 212 // that all chunks at a lower address are valid chunks, and a singly linked free
 213 // list connecting all empty chunks.
 214 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 215 public:
 216   // Number of oops that can fit in a single chunk.
 217   static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
 218 private:
 219   struct OopChunk {
 220     OopChunk* next;
 221     G1TaskQueueEntry data[EntriesPerChunk];
 222   };
 223 
 224   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.
 225 
 226   OopChunk* _base;               // Bottom address of allocated memory area.
 227   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 228 
 229   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 230   OopChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 231   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
 232   OopChunk* volatile _chunk_list; // List of chunks currently containing data.
 233   volatile size_t _chunks_in_chunk_list;
 234   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)];
 235 
 236   volatile size_t _hwm;          // High water mark within the reserved space.
 237   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 238 
 239   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 240   // NULL if out of memory.
 241   OopChunk* allocate_new_chunk();
 242 
 243   volatile bool _out_of_memory;
 244 
 245   // Atomically add the given chunk to the list.
 246   void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
 247   // Atomically remove and return a chunk from the given list. Returns NULL if the
 248   // list is empty.
 249   OopChunk* remove_chunk_from_list(OopChunk* volatile* list);
 250 
 251   void add_chunk_to_chunk_list(OopChunk* elem);
 252   void add_chunk_to_free_list(OopChunk* elem);
 253 
 254   OopChunk* remove_chunk_from_chunk_list();
 255   OopChunk* remove_chunk_from_free_list();
 256 
 257   bool  _should_expand;
 258 
 259   // Resizes the mark stack to the given new capacity. Releases any previous
 260   // memory if successful.
 261   bool resize(size_t new_capacity);
 262 
 263  public:
 264   G1CMMarkStack();
 265   ~G1CMMarkStack();
 266 
 267   // Alignment and minimum capacity of this mark stack in number of oops.
 268   static size_t capacity_alignment();
 269 
 270   // Allocate and initialize the mark stack with the given number of oops.
 271   bool initialize(size_t initial_capacity, size_t max_capacity);
 272 
 273   // Pushes the given buffer containing at most OopsPerChunk elements on the mark
 274   // stack. If less than OopsPerChunk elements are to be pushed, the array must
 275   // be terminated with a NULL.
 276   // Returns whether the buffer contents were successfully pushed to the global mark
 277   // stack.
 278   bool par_push_chunk(G1TaskQueueEntry* buffer);
 279 
 280   // Pops a chunk from this mark stack, copying them into the given buffer. This
 281   // chunk may contain up to OopsPerChunk elements. If there are less, the last
 282   // element in the array is a NULL pointer.
 283   bool par_pop_chunk(G1TaskQueueEntry* buffer);
 284 
 285   // Return whether the chunk list is empty. Racy due to unsynchronized access to
 286   // _chunk_list.
 287   bool is_empty() const { return _chunk_list == NULL; }
 288 
 289   size_t capacity() const  { return _chunk_capacity; }
 290 
 291   bool is_out_of_memory() const { return _out_of_memory; }
 292   void clear_out_of_memory() { _out_of_memory = false; }
 293 
 294   bool should_expand() const { return _should_expand; }
 295   void set_should_expand(bool value) { _should_expand = value; }
 296 
 297   // Expand the stack, typically in response to an overflow condition
 298   void expand();
 299 
 300   // Return the approximate number of oops on this mark stack. Racy due to
 301   // unsynchronized access to _chunks_in_chunk_list.


 859   // an expensive operation
 860   void decrease_limits();
 861   // it checks whether the words scanned or refs visited reached their
 862   // respective limit and calls reached_limit() if they have
 863   void check_limits() {
 864     if (_words_scanned >= _words_scanned_limit ||
 865         _refs_reached >= _refs_reached_limit) {
 866       reached_limit();
 867     }
 868   }
 869   // this is supposed to be called regularly during a marking step as
 870   // it checks a bunch of conditions that might cause the marking step
 871   // to abort
 872   void regular_clock_call();
 873   bool concurrent() { return _concurrent; }
 874 
 875   // Test whether obj might have already been passed over by the
 876   // mark bitmap scan, and so needs to be pushed onto the mark stack.
 877   bool is_below_finger(oop obj, HeapWord* global_finger) const;
 878 
 879   template<bool scan> void process_grey_object(G1TaskQueueEntry task_entry);
 880 public:
 881   // Apply the closure on the given area of the objArray. Return the number of words
 882   // scanned.
 883   inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
 884   // It resets the task; it should be called right at the beginning of
 885   // a marking phase.
 886   void reset(G1CMBitMap* _nextMarkBitMap);
 887   // it clears all the fields that correspond to a claimed region.
 888   void clear_region_fields();
 889 
 890   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
 891 
 892   // The main method of this class which performs a marking step
 893   // trying not to exceed the given duration. However, it might exit
 894   // prematurely, according to some conditions (i.e. SATB buffers are
 895   // available for processing).
 896   void do_marking_step(double target_ms,
 897                        bool do_termination,
 898                        bool is_serial);
 899 


 924   void clear_has_aborted()      { _has_aborted = false; }
 925   bool has_timed_out()          { return _has_timed_out; }
 926   bool claimed()                { return _claimed; }
 927 
 928   void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
 929 
 930   // Increment the number of references this task has visited.
 931   void increment_refs_reached() { ++_refs_reached; }
 932 
 933   // Grey the object by marking it.  If not already marked, push it on
 934   // the local queue if below the finger.
 935   // obj is below its region's NTAMS.
 936   inline void make_reference_grey(oop obj);
 937 
 938   // Grey the object (by calling make_grey_reference) if required,
 939   // e.g. obj is below its containing region's NTAMS.
 940   // Precondition: obj is a valid heap object.
 941   inline void deal_with_reference(oop obj);
 942 
 943   // It scans an object and visits its children.
 944   inline void scan_object(G1TaskQueueEntry task_entry);
 945 
 946   // It pushes an object on the local queue.
 947   inline void push(G1TaskQueueEntry task_entry);
 948 
 949   // Move entries to the global stack.
 950   void move_entries_to_global_stack();
 951   // Move entries from the global stack, return true if we were successful to do so.
 952   bool get_entries_from_global_stack();
 953 
 954   // It pops and scans objects from the local queue. If partially is
 955   // true, then it stops when the queue size is of a given limit. If
 956   // partially is false, then it stops when the queue is empty.
 957   void drain_local_queue(bool partially);
 958   // It moves entries from the global stack to the local queue and
 959   // drains the local queue. If partially is true, then it stops when
 960   // both the global stack and the local queue reach a given size. If
 961   // partially if false, it tries to empty them totally.
 962   void drain_global_stack(bool partially);
 963   // It keeps picking SATB buffers and processing them until no SATB
 964   // buffers are available.




  35 class G1CMBitMap;
  36 class G1CMTask;
  37 class G1ConcurrentMark;
  38 class ConcurrentGCTimer;
  39 class G1OldTracer;
  40 class G1SurvivorRegions;
  41 
  42 #ifdef _MSC_VER
  43 #pragma warning(push)
  44 // warning C4522: multiple assignment operators specified
  45 #pragma warning(disable:4522)
  46 #endif
  47 
  48 // This is a container class for either an oop or a continuation address for
  49 // mark stack entries. Both are pushed onto the mark stack.
  50 class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC {
  51 private:
  52   void* _holder;
  53 
  54   static const uintptr_t ArraySliceBit = 1;
  55 
  56   G1TaskQueueEntry(oop obj) : _holder(obj) {
  57     assert(_holder != NULL, "Not allowed to set NULL task queue element");
  58   }
  59   G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { }
  60 public:
  61   G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; }
  62   G1TaskQueueEntry() : _holder(NULL) { }
  63 
  64   static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); }
  65   static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); }
  66 
  67   G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) {
  68     _holder = t._holder;
  69     return *this;
  70   }
  71 
  72   volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile {
  73     _holder = t._holder;
  74     return *this;
  75   }
  76 
  77   oop obj() const {
  78     assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder));
  79     return (oop)_holder;
  80   }
  81 
  82   HeapWord* slice() const {
  83     assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder));
  84     return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit);
  85   }
  86 
  87   bool is_oop() const { return !is_array_slice(); }
  88   bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; }
  89   bool is_null() const { return _holder == NULL; }
  90 };
  91 
  92 #ifdef _MSC_VER
  93 #pragma warning(pop)
  94 #endif
  95 
  96 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue;
  97 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
  98 
  99 // Closure used by CM during concurrent reference discovery
 100 // and reference processing (during remarking) to determine
 101 // if a particular object is alive. It is primarily used
 102 // to determine if referents of discovered reference objects
 103 // are alive. An instance is also embedded into the
 104 // reference processor as the _is_alive_non_header field


 206 // Stores oops in a huge buffer in virtual memory that is always fully committed.
 207 // Resizing may only happen during a STW pause when the stack is empty.
 208 //
 209 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
 210 // stack memory is split into evenly sized chunks of oops. Users can only
 211 // add or remove entries on that basis.
 212 // Chunks are filled in increasing address order. Not completely filled chunks
 213 // have a NULL element as a terminating element.
 214 //
 215 // Every chunk has a header containing a single pointer element used for memory
 216 // management. This wastes some space, but is negligible (< .1% with current sizing).
 217 //
 218 // Memory management is done using a mix of tracking a high water-mark indicating
 219 // that all chunks at a lower address are valid chunks, and a singly linked free
 220 // list connecting all empty chunks.
 221 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
 222 public:
 223   // Number of oops that can fit in a single chunk.
 224   static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
 225 private:
 226   struct TaskQueueEntryChunk {
 227     TaskQueueEntryChunk* next;
 228     G1TaskQueueEntry data[EntriesPerChunk];
 229   };
 230 
 231   size_t _max_chunk_capacity;    // Maximum number of OopChunk elements on the stack.
 232 
 233   TaskQueueEntryChunk* _base;               // Bottom address of allocated memory area.
 234   size_t _chunk_capacity;        // Current maximum number of OopChunk elements.
 235 
 236   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 237   TaskQueueEntryChunk* volatile _free_list;  // Linked list of free chunks that can be allocated by users.
 238   char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)];
 239   TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data.
 240   volatile size_t _chunks_in_chunk_list;
 241   char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)];
 242 
 243   volatile size_t _hwm;          // High water mark within the reserved space.
 244   char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
 245 
 246   // Allocate a new chunk from the reserved memory, using the high water mark. Returns
 247   // NULL if out of memory.
 248   TaskQueueEntryChunk* allocate_new_chunk();
 249 
 250   volatile bool _out_of_memory;
 251 
 252   // Atomically add the given chunk to the list.
 253   void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem);
 254   // Atomically remove and return a chunk from the given list. Returns NULL if the
 255   // list is empty.
 256   TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list);
 257 
 258   void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem);
 259   void add_chunk_to_free_list(TaskQueueEntryChunk* elem);
 260 
 261   TaskQueueEntryChunk* remove_chunk_from_chunk_list();
 262   TaskQueueEntryChunk* remove_chunk_from_free_list();
 263 
 264   bool  _should_expand;
 265 
 266   // Resizes the mark stack to the given new capacity. Releases any previous
 267   // memory if successful.
 268   bool resize(size_t new_capacity);
 269 
 270  public:
 271   G1CMMarkStack();
 272   ~G1CMMarkStack();
 273 
 274   // Alignment and minimum capacity of this mark stack in number of oops.
 275   static size_t capacity_alignment();
 276 
 277   // Allocate and initialize the mark stack with the given number of oops.
 278   bool initialize(size_t initial_capacity, size_t max_capacity);
 279 
 280   // Pushes the given buffer containing at most EntriesPerChunk elements on the mark
 281   // stack. If less than EntriesPerChunk elements are to be pushed, the array must
 282   // be terminated with a NULL.
 283   // Returns whether the buffer contents were successfully pushed to the global mark
 284   // stack.
 285   bool par_push_chunk(G1TaskQueueEntry* buffer);
 286 
 287   // Pops a chunk from this mark stack, copying them into the given buffer. This
 288   // chunk may contain up to EntriesPerChunk elements. If there are less, the last
 289   // element in the array is a NULL pointer.
 290   bool par_pop_chunk(G1TaskQueueEntry* buffer);
 291 
 292   // Return whether the chunk list is empty. Racy due to unsynchronized access to
 293   // _chunk_list.
 294   bool is_empty() const { return _chunk_list == NULL; }
 295 
 296   size_t capacity() const  { return _chunk_capacity; }
 297 
 298   bool is_out_of_memory() const { return _out_of_memory; }
 299   void clear_out_of_memory() { _out_of_memory = false; }
 300 
 301   bool should_expand() const { return _should_expand; }
 302   void set_should_expand(bool value) { _should_expand = value; }
 303 
 304   // Expand the stack, typically in response to an overflow condition
 305   void expand();
 306 
 307   // Return the approximate number of oops on this mark stack. Racy due to
 308   // unsynchronized access to _chunks_in_chunk_list.


 866   // an expensive operation
 867   void decrease_limits();
 868   // it checks whether the words scanned or refs visited reached their
 869   // respective limit and calls reached_limit() if they have
 870   void check_limits() {
 871     if (_words_scanned >= _words_scanned_limit ||
 872         _refs_reached >= _refs_reached_limit) {
 873       reached_limit();
 874     }
 875   }
 876   // this is supposed to be called regularly during a marking step as
 877   // it checks a bunch of conditions that might cause the marking step
 878   // to abort
 879   void regular_clock_call();
 880   bool concurrent() { return _concurrent; }
 881 
 882   // Test whether obj might have already been passed over by the
 883   // mark bitmap scan, and so needs to be pushed onto the mark stack.
 884   bool is_below_finger(oop obj, HeapWord* global_finger) const;
 885 
 886   template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry);
 887 public:
 888   // Apply the closure on the given area of the objArray. Return the number of words
 889   // scanned.
 890   inline size_t scan_objArray(objArrayOop obj, MemRegion mr);
 891   // It resets the task; it should be called right at the beginning of
 892   // a marking phase.
 893   void reset(G1CMBitMap* _nextMarkBitMap);
 894   // it clears all the fields that correspond to a claimed region.
 895   void clear_region_fields();
 896 
 897   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
 898 
 899   // The main method of this class which performs a marking step
 900   // trying not to exceed the given duration. However, it might exit
 901   // prematurely, according to some conditions (i.e. SATB buffers are
 902   // available for processing).
 903   void do_marking_step(double target_ms,
 904                        bool do_termination,
 905                        bool is_serial);
 906 


 931   void clear_has_aborted()      { _has_aborted = false; }
 932   bool has_timed_out()          { return _has_timed_out; }
 933   bool claimed()                { return _claimed; }
 934 
 935   void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
 936 
 937   // Increment the number of references this task has visited.
 938   void increment_refs_reached() { ++_refs_reached; }
 939 
 940   // Grey the object by marking it.  If not already marked, push it on
 941   // the local queue if below the finger.
 942   // obj is below its region's NTAMS.
 943   inline void make_reference_grey(oop obj);
 944 
 945   // Grey the object (by calling make_grey_reference) if required,
 946   // e.g. obj is below its containing region's NTAMS.
 947   // Precondition: obj is a valid heap object.
 948   inline void deal_with_reference(oop obj);
 949 
 950   // It scans an object and visits its children.
 951   inline void scan_task_entry(G1TaskQueueEntry task_entry);
 952 
 953   // It pushes an object on the local queue.
 954   inline void push(G1TaskQueueEntry task_entry);
 955 
 956   // Move entries to the global stack.
 957   void move_entries_to_global_stack();
 958   // Move entries from the global stack, return true if we were successful to do so.
 959   bool get_entries_from_global_stack();
 960 
 961   // It pops and scans objects from the local queue. If partially is
 962   // true, then it stops when the queue size is of a given limit. If
 963   // partially is false, then it stops when the queue is empty.
 964   void drain_local_queue(bool partially);
 965   // It moves entries from the global stack to the local queue and
 966   // drains the local queue. If partially is true, then it stops when
 967   // both the global stack and the local queue reach a given size. If
 968   // partially if false, it tries to empty them totally.
 969   void drain_global_stack(bool partially);
 970   // It keeps picking SATB buffers and processing them until no SATB
 971   // buffers are available.


< prev index next >