205 // Return whether the chunk list is empty. Racy due to unsynchronized access to
206 // _chunk_list.
207 bool is_empty() const { return _chunk_list == NULL; }
208
209 size_t capacity() const { return _chunk_capacity; }
210
211 // Expand the stack, typically in response to an overflow condition
212 void expand();
213
214 // Return the approximate number of oops on this mark stack. Racy due to
215 // unsynchronized access to _chunks_in_chunk_list.
216 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
217
218 void set_empty();
219
220 // Apply Fn to every oop on the mark stack. The mark stack must not
221 // be modified while iterating.
222 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
223 };
224
225 // Root Regions are regions that contain objects from nTAMS to top. These are roots
226 // for marking, i.e. their referenced objects must be kept alive to maintain the
227 // SATB invariant.
228 // We could scan and mark them through during the initial-mark pause, but for
229 // pause time reasons we move this work to the concurrent phase.
230 // We need to complete this procedure before the next GC because it might determine
231 // that some of these "root objects" are dead, potentially dropping some required
232 // references.
233 // Root regions comprise of the complete contents of survivor regions, and any
234 // objects copied into old gen during GC.
235 class G1CMRootRegions {
236 HeapRegion** _root_regions;
237 size_t const _max_regions;
238
239 volatile size_t _num_root_regions; // Actual number of root regions.
240
241 volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
242
243 volatile bool _scan_in_progress;
244 volatile bool _should_abort;
245
246 void notify_scan_done();
247
248 public:
249 G1CMRootRegions(uint const max_regions);
250 ~G1CMRootRegions();
251
252 // Reset the data structure to allow addition of new root regions.
253 void reset();
254
255 void add(HeapRegion* hr);
256
257 // Reset the claiming / scanning of the root regions.
258 void prepare_for_scan();
259
260 // Forces get_next() to return NULL so that the iteration aborts early.
261 void abort() { _should_abort = true; }
262
263 // Return true if the CM thread are actively scanning root regions,
264 // false otherwise.
265 bool scan_in_progress() { return _scan_in_progress; }
266
267 // Claim the next root region to scan atomically, or return NULL if
268 // all have been claimed.
269 HeapRegion* claim_next();
270
271 // The number of root regions to scan.
272 uint num_root_regions() const;
273
274 void cancel_scan();
275
276 // Flag that we're done with root region scanning and notify anyone
277 // who's waiting on it. If aborted is false, assume that all regions
278 // have been claimed.
279 void scan_finished();
280
281 // If CM threads are still scanning root regions, wait until they
282 // are done. Return true if we had to wait, false otherwise.
283 bool wait_until_scan_finished();
284 };
285
286 // This class manages data structures and methods for doing liveness analysis in
287 // G1's concurrent cycle.
288 class G1ConcurrentMark : public CHeapObj<mtGC> {
289 friend class G1ConcurrentMarkThread;
293 friend class G1CMDrainMarkingStackClosure;
294 friend class G1CMBitMapClosure;
295 friend class G1CMConcurrentMarkingTask;
296 friend class G1CMRemarkTask;
297 friend class G1CMTask;
298
299 G1ConcurrentMarkThread* _cm_thread; // The thread doing the work
300 G1CollectedHeap* _g1h; // The heap
301 bool _completed_initialization; // Set to true when initialization is complete
302
303 // Concurrent marking support structures
304 G1CMBitMap _mark_bitmap_1;
305 G1CMBitMap _mark_bitmap_2;
306 G1CMBitMap* _prev_mark_bitmap; // Completed mark bitmap
307 G1CMBitMap* _next_mark_bitmap; // Under-construction mark bitmap
308
309 // Heap bounds
310 MemRegion const _heap;
311
312 // Root region tracking and claiming
313 G1CMRootRegions _root_regions;
314
315 // For grey objects
316 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
317 HeapWord* volatile _finger; // The global finger, region aligned,
318 // always pointing to the end of the
319 // last claimed region
320
321 uint _worker_id_offset;
322 uint _max_num_tasks; // Maximum number of marking tasks
323 uint _num_active_tasks; // Number of tasks currently active
324 G1CMTask** _tasks; // Task queue array (max_worker_id length)
325
326 G1CMTaskQueueSet* _task_queues; // Task queue set
327 TaskTerminator _terminator; // For termination
328
329 // Two sync barriers that are used to synchronize tasks when an
330 // overflow occurs. The algorithm is the following. All tasks enter
331 // the first one to ensure that they have all stopped manipulating
332 // the global data structures. After they exit it, they re-initialize
333 // their data structures and task 0 re-initializes the global data
484 void clear_statistics_in_region(uint region_idx);
485 // Notification for eagerly reclaimed regions to clean up.
486 void humongous_object_eagerly_reclaimed(HeapRegion* r);
487 // Manipulation of the global mark stack.
488 // The push and pop operations are used by tasks for transfers
489 // between task-local queues and the global mark stack.
490 bool mark_stack_push(G1TaskQueueEntry* arr) {
491 if (!_global_mark_stack.par_push_chunk(arr)) {
492 set_has_overflown();
493 return false;
494 }
495 return true;
496 }
497 bool mark_stack_pop(G1TaskQueueEntry* arr) {
498 return _global_mark_stack.par_pop_chunk(arr);
499 }
500 size_t mark_stack_size() const { return _global_mark_stack.size(); }
501 size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
502 bool mark_stack_empty() const { return _global_mark_stack.is_empty(); }
503
504 G1CMRootRegions* root_regions() { return &_root_regions; }
505
506 void concurrent_cycle_start();
507 // Abandon current marking iteration due to a Full GC.
508 void concurrent_cycle_abort();
509 void concurrent_cycle_end();
510
511 void update_accum_task_vtime(int i, double vtime) {
512 _accum_task_vtime[i] += vtime;
513 }
514
515 double all_task_accum_vtime() {
516 double ret = 0.0;
517 for (uint i = 0; i < _max_num_tasks; ++i)
518 ret += _accum_task_vtime[i];
519 return ret;
520 }
521
522 // Attempts to steal an object from the task queues of other tasks
523 bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry);
524
537
538 // Moves all per-task cached data into global state.
539 void flush_all_task_caches();
540 // Prepare internal data structures for the next mark cycle. This includes clearing
541 // the next mark bitmap and some internal data structures. This method is intended
542 // to be called concurrently to the mutator. It will yield to safepoint requests.
543 void cleanup_for_next_mark();
544
545 // Clear the previous marking bitmap during safepoint.
546 void clear_prev_bitmap(WorkGang* workers);
547
548 // These two methods do the work that needs to be done at the start and end of the
549 // initial mark pause.
550 void pre_initial_mark();
551 void post_initial_mark();
552
553 // Scan all the root regions and mark everything reachable from
554 // them.
555 void scan_root_regions();
556
557 // Scan a single root region from nTAMS to top and mark everything reachable from it.
558 void scan_root_region(HeapRegion* hr, uint worker_id);
559
560 // Do concurrent phase of marking, to a tentative transitive closure.
561 void mark_from_roots();
562
563 // Do concurrent preclean work.
564 void preclean();
565
566 void remark();
567
568 void cleanup();
569 // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
570 // this carefully.
571 inline void mark_in_prev_bitmap(oop p);
572
573 // Clears marks for all objects in the given range, for the prev or
574 // next bitmaps. Caution: the previous bitmap is usually
575 // read-only, so use this carefully!
576 void clear_range_in_prev_bitmap(MemRegion mr);
577
578 inline bool is_marked_in_prev_bitmap(oop p) const;
|
205 // Return whether the chunk list is empty. Racy due to unsynchronized access to
206 // _chunk_list.
207 bool is_empty() const { return _chunk_list == NULL; }
208
209 size_t capacity() const { return _chunk_capacity; }
210
211 // Expand the stack, typically in response to an overflow condition
212 void expand();
213
214 // Return the approximate number of oops on this mark stack. Racy due to
215 // unsynchronized access to _chunks_in_chunk_list.
216 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
217
218 void set_empty();
219
220 // Apply Fn to every oop on the mark stack. The mark stack must not
221 // be modified while iterating.
222 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
223 };
224
225 // Root MemRegions are memory regions that contain objects from nTAMS to top. These are roots
226 // for marking, i.e. their referenced objects must be kept alive to maintain the
227 // SATB invariant.
228 // We could scan and mark them through during the initial-mark pause, but for
229 // pause time reasons we move this work to the concurrent phase.
230 // We need to complete this procedure before the next GC because it might determine
231 // that some of these "root objects" are dead, potentially dropping some required
232 // references.
233 // Root MemRegions comprise of the complete contents of survivor regions, and any
234 // objects copied into old gen during GC.
235 class G1CMRootMemRegions {
236 // The set of root MemRegions. We store the corresponding nTAMS and top() pointer
237 // for a given heap region into MemRegion to know the exact area to scan because
238 // the top of a reused survivor region changes over time.
239 MemRegion* _root_regions;
240 size_t const _max_regions;
241
242 volatile size_t _num_root_regions; // Actual number of root regions.
243
244 volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
245
246 volatile bool _scan_in_progress;
247 volatile bool _should_abort;
248
249 void notify_scan_done();
250
251 public:
252 G1CMRootMemRegions(uint const max_regions);
253 ~G1CMRootMemRegions();
254
255 // Reset the data structure to allow addition of new root regions.
256 void reset();
257
258 void add(HeapWord* start, HeapWord* end);
259
260 // Reset the claiming / scanning of the root regions.
261 void prepare_for_scan();
262
263 // Forces get_next() to return NULL so that the iteration aborts early.
264 void abort() { _should_abort = true; }
265
266 // Return true if the CM thread are actively scanning root regions,
267 // false otherwise.
268 bool scan_in_progress() { return _scan_in_progress; }
269
270 // Claim the next root MemRegion to scan atomically, or return NULL if
271 // all have been claimed.
272 MemRegion* claim_next();
273
274 // The number of root regions to scan.
275 uint num_root_regions() const;
276
277 void cancel_scan();
278
279 // Flag that we're done with root region scanning and notify anyone
280 // who's waiting on it. If aborted is false, assume that all regions
281 // have been claimed.
282 void scan_finished();
283
284 // If CM threads are still scanning root regions, wait until they
285 // are done. Return true if we had to wait, false otherwise.
286 bool wait_until_scan_finished();
287 };
288
289 // This class manages data structures and methods for doing liveness analysis in
290 // G1's concurrent cycle.
291 class G1ConcurrentMark : public CHeapObj<mtGC> {
292 friend class G1ConcurrentMarkThread;
296 friend class G1CMDrainMarkingStackClosure;
297 friend class G1CMBitMapClosure;
298 friend class G1CMConcurrentMarkingTask;
299 friend class G1CMRemarkTask;
300 friend class G1CMTask;
301
302 G1ConcurrentMarkThread* _cm_thread; // The thread doing the work
303 G1CollectedHeap* _g1h; // The heap
304 bool _completed_initialization; // Set to true when initialization is complete
305
306 // Concurrent marking support structures
307 G1CMBitMap _mark_bitmap_1;
308 G1CMBitMap _mark_bitmap_2;
309 G1CMBitMap* _prev_mark_bitmap; // Completed mark bitmap
310 G1CMBitMap* _next_mark_bitmap; // Under-construction mark bitmap
311
312 // Heap bounds
313 MemRegion const _heap;
314
315 // Root region tracking and claiming
316 G1CMRootMemRegions _root_regions;
317
318 // For grey objects
319 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
320 HeapWord* volatile _finger; // The global finger, region aligned,
321 // always pointing to the end of the
322 // last claimed region
323
324 uint _worker_id_offset;
325 uint _max_num_tasks; // Maximum number of marking tasks
326 uint _num_active_tasks; // Number of tasks currently active
327 G1CMTask** _tasks; // Task queue array (max_worker_id length)
328
329 G1CMTaskQueueSet* _task_queues; // Task queue set
330 TaskTerminator _terminator; // For termination
331
332 // Two sync barriers that are used to synchronize tasks when an
333 // overflow occurs. The algorithm is the following. All tasks enter
334 // the first one to ensure that they have all stopped manipulating
335 // the global data structures. After they exit it, they re-initialize
336 // their data structures and task 0 re-initializes the global data
487 void clear_statistics_in_region(uint region_idx);
488 // Notification for eagerly reclaimed regions to clean up.
489 void humongous_object_eagerly_reclaimed(HeapRegion* r);
490 // Manipulation of the global mark stack.
491 // The push and pop operations are used by tasks for transfers
492 // between task-local queues and the global mark stack.
493 bool mark_stack_push(G1TaskQueueEntry* arr) {
494 if (!_global_mark_stack.par_push_chunk(arr)) {
495 set_has_overflown();
496 return false;
497 }
498 return true;
499 }
500 bool mark_stack_pop(G1TaskQueueEntry* arr) {
501 return _global_mark_stack.par_pop_chunk(arr);
502 }
503 size_t mark_stack_size() const { return _global_mark_stack.size(); }
504 size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
505 bool mark_stack_empty() const { return _global_mark_stack.is_empty(); }
506
507 G1CMRootMemRegions* root_regions() { return &_root_regions; }
508
509 void concurrent_cycle_start();
510 // Abandon current marking iteration due to a Full GC.
511 void concurrent_cycle_abort();
512 void concurrent_cycle_end();
513
514 void update_accum_task_vtime(int i, double vtime) {
515 _accum_task_vtime[i] += vtime;
516 }
517
518 double all_task_accum_vtime() {
519 double ret = 0.0;
520 for (uint i = 0; i < _max_num_tasks; ++i)
521 ret += _accum_task_vtime[i];
522 return ret;
523 }
524
525 // Attempts to steal an object from the task queues of other tasks
526 bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry);
527
540
541 // Moves all per-task cached data into global state.
542 void flush_all_task_caches();
543 // Prepare internal data structures for the next mark cycle. This includes clearing
544 // the next mark bitmap and some internal data structures. This method is intended
545 // to be called concurrently to the mutator. It will yield to safepoint requests.
546 void cleanup_for_next_mark();
547
548 // Clear the previous marking bitmap during safepoint.
549 void clear_prev_bitmap(WorkGang* workers);
550
551 // These two methods do the work that needs to be done at the start and end of the
552 // initial mark pause.
553 void pre_initial_mark();
554 void post_initial_mark();
555
556 // Scan all the root regions and mark everything reachable from
557 // them.
558 void scan_root_regions();
559
560 // Scan a single root MemRegion which holds from nTAMS to top and mark everything reachable from it.
561 void scan_root_region(MemRegion* region, uint worker_id);
562
563 // Do concurrent phase of marking, to a tentative transitive closure.
564 void mark_from_roots();
565
566 // Do concurrent preclean work.
567 void preclean();
568
569 void remark();
570
571 void cleanup();
572 // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
573 // this carefully.
574 inline void mark_in_prev_bitmap(oop p);
575
576 // Clears marks for all objects in the given range, for the prev or
577 // next bitmaps. Caution: the previous bitmap is usually
578 // read-only, so use this carefully!
579 void clear_range_in_prev_bitmap(MemRegion mr);
580
581 inline bool is_marked_in_prev_bitmap(oop p) const;
|