135 G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
136
137 // Initializes the underlying BitMap to cover the given area.
138 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
139
140 // Write marks.
141 inline void mark(HeapWord* addr);
142 inline void clear(HeapWord* addr);
143 inline bool parMark(HeapWord* addr);
144
145 void clear_range(MemRegion mr);
146 };
147
148 // Represents the overflow mark stack used by concurrent marking.
149 //
150 // Stores oops in a huge buffer in virtual memory that is always fully committed.
151 // Resizing may only happen during a STW pause when the stack is empty.
152 //
153 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
154 // stack memory is split into evenly sized chunks of oops. Users can only
155 // add an remove entries on that basis.
156 // Chunks are filled in increasing address order. Not completely filled chunks
157 // have a NULL element as a terminating element.
158 //
159 // Every chunk has a header containing a single pointer element used for memory
160 // management. This wastes some space, but is negligible (< .1% with current sizing).
161 //
162 // Memory management is done using a mix of tracking a high water-mark indicating
163 // that all chunks at a lower address are valid chunks, and a singly linked free
164 // list connecting all empty chunks.
165 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
166 public:
167 // Number of oops that can fit in a single chunk.
168 static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */;
169 private:
170 struct OopChunk {
171 OopChunk* next;
172 oop data[OopsPerChunk];
173 };
174
175 size_t _max_chunk_capacity; // Maximum number of OopChunk elements on the stack.
176
177 OopChunk* _base; // Bottom address of allocated memory area.
178 size_t _chunk_capacity; // Current maximum number of OopChunk elements.
179
180 char _pad0[DEFAULT_CACHE_LINE_SIZE];
181 OopChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
182 char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
183 OopChunk* volatile _chunk_list; // List of chunks currently containing data.
184 char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
185
186 size_t volatile _chunks_in_chunk_list;
187 char _pad3[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
188
189 volatile size_t _hwm; // High water mark within the reserved space.
190 char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
191
192 // Allocate a new chunk from the reserved memory, using the high water mark. Returns
193 // NULL if out of memory.
194 OopChunk* allocate_new_chunk();
195
196 bool _out_of_memory;
197
198 // Atomically add the given chunk to the list.
199 void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
200 // Atomically remove and return a chunk from the given list. Returns NULL if the
201 // list is empty.
202 OopChunk* remove_chunk_from_list(OopChunk* volatile* list); bool _should_expand;
203
204 // Resizes the mark stack to the given new capacity. Releases any previous
205 // memory if successful.
206 bool resize(size_t new_capacity);
207
208 public:
209 G1CMMarkStack();
210 ~G1CMMarkStack();
211
212 // Alignment and minimum capacity of this mark stack in number of oops.
213 static size_t capacity_alignment();
214
215 // Allocate and initialize the mark stack with the given number of oops.
216 bool initialize(size_t initial_capacity, size_t max_capacity);
217
218 // Pushes the given buffer containing at most OopsPerChunk elements on the mark
219 // stack. If less than OopsPerChunk elements are to be pushed, the array must
220 // be terminated with a NULL.
221 void par_push_chunk(oop* buffer);
222
223 // Pops a chunk from this mark stack, copying them into the given buffer. This
224 // chunk may contain up to OopsPerChunk elements. If there are less, the last
225 // element in the array is a NULL pointer.
226 bool par_pop_chunk(oop* buffer);
227
228 bool is_empty() const { return _chunk_list == NULL && _chunks_in_chunk_list == 0; }
229
230 size_t capacity() const { return _chunk_capacity; }
231
232 bool is_out_of_memory() const { return _out_of_memory; }
233 void clear_out_of_memory() { _out_of_memory = false; }
234
235 bool should_expand() const { return _should_expand; }
236 void set_should_expand(bool value) { _should_expand = value; }
237
238 // Expand the stack, typically in response to an overflow condition
239 void expand();
240
241 // Return the approximate number of oops on this mark stack. Racy due to
242 // unsynchronized access to _chunks_in_chunk_list.
243 size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; }
244
245 void set_empty();
246
247 // Apply Fn to every oop on the mark stack. The mark stack must not
248 // be modified while iterating.
249 template<typename Fn> void iterate(Fn fn) PRODUCT_RETURN;
250 };
251
252 // Root Regions are regions that are not empty at the beginning of a
253 // marking cycle and which we might collect during an evacuation pause
254 // while the cycle is active. Given that, during evacuation pauses, we
255 // do not copy objects that are explicitly marked, what we have to do
256 // for the root regions is to scan them and mark all objects reachable
257 // from them. According to the SATB assumptions, we only need to visit
258 // each object once during marking. So, as long as we finish this scan
259 // before the next evacuation pause, we can copy the objects from the
260 // root regions without having to mark them or do anything else to them.
261 //
262 // Currently, we only support root region scanning once (at the start
263 // of the marking cycle) and the root regions are all the survivor
264 // regions populated during the initial-mark pause.
265 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
266 private:
267 const G1SurvivorRegions* _survivors;
268 G1ConcurrentMark* _cm;
269
504 void enter_second_sync_barrier(uint worker_id);
505
506 // Card index of the bottom of the G1 heap. Used for biasing indices into
507 // the card bitmaps.
508 intptr_t _heap_bottom_card_num;
509
510 // Set to true when initialization is complete
511 bool _completed_initialization;
512
513 // end_timer, true to end gc timer after ending concurrent phase.
514 void register_concurrent_phase_end_common(bool end_timer);
515
516 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
517 // true, periodically insert checks to see if this method should exit prematurely.
518 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
519 public:
520 // Manipulation of the global mark stack.
521 // The push and pop operations are used by tasks for transfers
522 // between task-local queues and the global mark stack.
523 bool mark_stack_push(oop* arr) {
524 _global_mark_stack.par_push_chunk(arr);
525 if (_global_mark_stack.is_out_of_memory()) {
526 set_has_overflown();
527 return false;
528 }
529 return true;
530 }
531 bool mark_stack_pop(oop* arr) {
532 return _global_mark_stack.par_pop_chunk(arr);
533 }
534 size_t mark_stack_size() { return _global_mark_stack.size(); }
535 size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
536 bool mark_stack_overflow() { return _global_mark_stack.is_out_of_memory(); }
537 bool mark_stack_empty() { return _global_mark_stack.is_empty(); }
538
539 G1CMRootRegions* root_regions() { return &_root_regions; }
540
541 bool concurrent_marking_in_progress() {
542 return _concurrent_marking_in_progress;
543 }
544 void set_concurrent_marking_in_progress() {
545 _concurrent_marking_in_progress = true;
|
135 G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
136
137 // Initializes the underlying BitMap to cover the given area.
138 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
139
140 // Write marks.
141 inline void mark(HeapWord* addr);
142 inline void clear(HeapWord* addr);
143 inline bool parMark(HeapWord* addr);
144
145 void clear_range(MemRegion mr);
146 };
147
148 // Represents the overflow mark stack used by concurrent marking.
149 //
150 // Stores oops in a huge buffer in virtual memory that is always fully committed.
151 // Resizing may only happen during a STW pause when the stack is empty.
152 //
153 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark
154 // stack memory is split into evenly sized chunks of oops. Users can only
155 // add or remove entries on that basis.
156 // Chunks are filled in increasing address order. Not completely filled chunks
157 // have a NULL element as a terminating element.
158 //
159 // Every chunk has a header containing a single pointer element used for memory
160 // management. This wastes some space, but is negligible (< .1% with current sizing).
161 //
162 // Memory management is done using a mix of tracking a high water-mark indicating
163 // that all chunks at a lower address are valid chunks, and a singly linked free
164 // list connecting all empty chunks.
165 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
166 public:
167 // Number of oops that can fit in a single chunk.
168 static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */;
169 private:
170 struct OopChunk {
171 OopChunk* next;
172 oop data[OopsPerChunk];
173 };
174
175 size_t _max_chunk_capacity; // Maximum number of OopChunk elements on the stack.
176
177 OopChunk* _base; // Bottom address of allocated memory area.
178 size_t _chunk_capacity; // Current maximum number of OopChunk elements.
179
180 char _pad0[DEFAULT_CACHE_LINE_SIZE];
181 OopChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users.
182 char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
183 OopChunk* volatile _chunk_list; // List of chunks currently containing data.
184 char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)];
185
186 volatile size_t _hwm; // High water mark within the reserved space.
187 char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)];
188
189 // Allocate a new chunk from the reserved memory, using the high water mark. Returns
190 // NULL if out of memory.
191 OopChunk* allocate_new_chunk();
192
193 volatile size_t _chunks_in_chunk_list;
194
195 volatile bool _out_of_memory;
196
197 // Atomically add the given chunk to the list.
198 void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem);
199 // Atomically remove and return a chunk from the given list. Returns NULL if the
200 // list is empty.
201 OopChunk* remove_chunk_from_list(OopChunk* volatile* list);
202
203 void add_chunk_to_chunk_list(OopChunk* elem);
204 void add_chunk_to_free_list(OopChunk* elem);
205
206 OopChunk* remove_chunk_from_chunk_list();
207 OopChunk* remove_chunk_from_free_list();
208
209 bool _should_expand;
210
211 // Resizes the mark stack to the given new capacity. Releases any previous
212 // memory if successful.
213 bool resize(size_t new_capacity);
214
215 public:
216 G1CMMarkStack();
217 ~G1CMMarkStack();
218
219 // Alignment and minimum capacity of this mark stack in number of oops.
220 static size_t capacity_alignment();
221
222 // Allocate and initialize the mark stack with the given number of oops.
223 bool initialize(size_t initial_capacity, size_t max_capacity);
224
225 // Pushes the given buffer containing at most OopsPerChunk elements on the mark
226 // stack. If less than OopsPerChunk elements are to be pushed, the array must
227 // be terminated with a NULL.
228 // Returns whether the buffer contents were successfully pushed to the global mark
229 // stack.
230 bool par_push_chunk(oop* buffer);
231
232 // Pops a chunk from this mark stack, copying them into the given buffer. This
233 // chunk may contain up to OopsPerChunk elements. If there are less, the last
234 // element in the array is a NULL pointer.
235 bool par_pop_chunk(oop* buffer);
236
237 // Return whether the chunk list is empty. Racy due to unsynchronized access to
238 // _chunk_list.
239 bool is_empty() const { return _chunk_list == NULL; }
240
241 size_t capacity() const { return _chunk_capacity; }
242
243 bool is_out_of_memory() const { return _out_of_memory; }
244 void clear_out_of_memory() { _out_of_memory = false; }
245
246 bool should_expand() const { return _should_expand; }
247 void set_should_expand(bool value) { _should_expand = value; }
248
249 // Expand the stack, typically in response to an overflow condition
250 void expand();
251
252 // Return the approximate number of oops on this mark stack. Racy due to
253 // unsynchronized access to _chunks_in_chunk_list.
254 size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; }
255
256 void set_empty();
257
258 // Apply Fn to every oop on the mark stack. The mark stack must not
259 // be modified while iterating.
260 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
261 };
262
263 // Root Regions are regions that are not empty at the beginning of a
264 // marking cycle and which we might collect during an evacuation pause
265 // while the cycle is active. Given that, during evacuation pauses, we
266 // do not copy objects that are explicitly marked, what we have to do
267 // for the root regions is to scan them and mark all objects reachable
268 // from them. According to the SATB assumptions, we only need to visit
269 // each object once during marking. So, as long as we finish this scan
270 // before the next evacuation pause, we can copy the objects from the
271 // root regions without having to mark them or do anything else to them.
272 //
273 // Currently, we only support root region scanning once (at the start
274 // of the marking cycle) and the root regions are all the survivor
275 // regions populated during the initial-mark pause.
276 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
277 private:
278 const G1SurvivorRegions* _survivors;
279 G1ConcurrentMark* _cm;
280
515 void enter_second_sync_barrier(uint worker_id);
516
517 // Card index of the bottom of the G1 heap. Used for biasing indices into
518 // the card bitmaps.
519 intptr_t _heap_bottom_card_num;
520
521 // Set to true when initialization is complete
522 bool _completed_initialization;
523
524 // end_timer, true to end gc timer after ending concurrent phase.
525 void register_concurrent_phase_end_common(bool end_timer);
526
527 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
528 // true, periodically insert checks to see if this method should exit prematurely.
529 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
530 public:
531 // Manipulation of the global mark stack.
532 // The push and pop operations are used by tasks for transfers
533 // between task-local queues and the global mark stack.
534 bool mark_stack_push(oop* arr) {
535 if (!_global_mark_stack.par_push_chunk(arr)) {
536 set_has_overflown();
537 return false;
538 }
539 return true;
540 }
541 bool mark_stack_pop(oop* arr) {
542 return _global_mark_stack.par_pop_chunk(arr);
543 }
544 size_t mark_stack_size() { return _global_mark_stack.size(); }
545 size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
546 bool mark_stack_overflow() { return _global_mark_stack.is_out_of_memory(); }
547 bool mark_stack_empty() { return _global_mark_stack.is_empty(); }
548
549 G1CMRootRegions* root_regions() { return &_root_regions; }
550
551 bool concurrent_marking_in_progress() {
552 return _concurrent_marking_in_progress;
553 }
554 void set_concurrent_marking_in_progress() {
555 _concurrent_marking_in_progress = true;
|