203 size_t capacity() const { return _chunk_capacity; }
204
205 // Expand the stack, typically in response to an overflow condition
206 void expand();
207
208 // Return the approximate number of oops on this mark stack. Racy due to
209 // unsynchronized access to _chunks_in_chunk_list.
210 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
211
212 void set_empty();
213
214 // Apply Fn to every oop on the mark stack. The mark stack must not
215 // be modified while iterating.
216 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
217 };
218
219 // Root MemRegions are memory areas that contain objects which references are
220 // roots wrt to the marking. They must be scanned before marking to maintain the
221 // SATB invariant.
222 // Typically they contain the areas from nTAMS to top of the regions.
223 // We could scan and mark through these objects during the initial-mark pause, but for
224 // pause time reasons we move this work to the concurrent phase.
225 // We need to complete this procedure before the next GC because it might determine
226 // that some of these "root objects" are dead, potentially dropping some required
227 // references.
228 // Root MemRegions comprise of the contents of survivor regions at the end
229 // of the GC, and any objects copied into the old gen during GC.
230 class G1CMRootMemRegions {
231 // The set of root MemRegions.
232 MemRegion* _root_regions;
233 size_t const _max_regions;
234
235 volatile size_t _num_root_regions; // Actual number of root regions.
236
237 volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
238
239 volatile bool _scan_in_progress;
240 volatile bool _should_abort;
241
242 void notify_scan_done();
243
244 public:
367
368 void finalize_marking();
369
370 void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
371 void weak_refs_work(bool clear_all_soft_refs);
372
373 void report_object_count(bool mark_completed);
374
375 void swap_mark_bitmaps();
376
377 void reclaim_empty_regions();
378
379 // After reclaiming empty regions, update heap sizes.
380 void compute_new_sizes();
381
382 // Clear statistics gathered during the concurrent cycle for the given region after
383 // it has been reclaimed.
384 void clear_statistics(HeapRegion* r);
385
386 // Resets the global marking data structures, as well as the
387 // task local ones; should be called during initial mark.
388 void reset();
389
390 // Resets all the marking data structures. Called when we have to restart
391 // marking or when marking completes (via set_non_marking_state below).
392 void reset_marking_for_restart();
393
394 // We do this after we're done with marking so that the marking data
395 // structures are initialized to a sensible and predictable state.
396 void reset_at_marking_complete();
397
398 // Called to indicate how many threads are currently active.
399 void set_concurrency(uint active_tasks);
400
401 // Should be called to indicate which phase we're in (concurrent
402 // mark or remark) and how many threads are currently active.
403 void set_concurrency_and_phase(uint active_tasks, bool concurrent);
404
405 // Prints all gathered CM-related statistics
406 void print_stats();
407
418 // awkward. Originally, the code was written so that claim_region()
419 // either successfully returned with a non-empty region or there
420 // were no more regions to be claimed. The problem with this was
421 // that, in certain circumstances, it iterated over large chunks of
422 // the heap finding only empty regions and, while it was working, it
423 // was preventing the calling task to call its regular clock
424 // method. So, this way, each task will spend very little time in
425 // claim_region() and is allowed to call the regular clock method
426 // frequently.
427 HeapRegion* claim_region(uint worker_id);
428
429 // Determines whether we've run out of regions to scan. Note that
430 // the finger can point past the heap end in case the heap was expanded
431 // to satisfy an allocation without doing a GC. This is fine, because all
432 // objects in those regions will be considered live anyway because of
433 // SATB guarantees (i.e. their TAMS will be equal to bottom).
434 bool out_of_regions() { return _finger >= _heap.end(); }
435
436 // Returns the task with the given id
437 G1CMTask* task(uint id) {
438 // During initial mark we use the parallel gc threads to do some work, so
439 // we can only compare against _max_num_tasks.
440 assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks);
441 return _tasks[id];
442 }
443
444 // Access / manipulation of the overflow flag which is set to
445 // indicate that the global stack has overflown
446 bool has_overflown() { return _has_overflown; }
447 void set_has_overflown() { _has_overflown = true; }
448 void clear_has_overflown() { _has_overflown = false; }
449 bool restart_for_overflow() { return _restart_for_overflow; }
450
451 // Methods to enter the two overflow sync barriers
452 void enter_first_sync_barrier(uint worker_id);
453 void enter_second_sync_barrier(uint worker_id);
454
455 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
456 // true, periodically insert checks to see if this method should exit prematurely.
457 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
458
524
525 G1ConcurrentMarkThread* cm_thread() { return _cm_thread; }
526
527 const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
528 G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
529
530 // Calculates the number of concurrent GC threads to be used in the marking phase.
531 uint calc_active_marking_workers();
532
533 // Moves all per-task cached data into global state.
534 void flush_all_task_caches();
535 // Prepare internal data structures for the next mark cycle. This includes clearing
536 // the next mark bitmap and some internal data structures. This method is intended
537 // to be called concurrently to the mutator. It will yield to safepoint requests.
538 void cleanup_for_next_mark();
539
540 // Clear the previous marking bitmap during safepoint.
541 void clear_prev_bitmap(WorkGang* workers);
542
543 // These two methods do the work that needs to be done at the start and end of the
544 // initial mark pause.
545 void pre_initial_mark();
546 void post_initial_mark();
547
548 // Scan all the root regions and mark everything reachable from
549 // them.
550 void scan_root_regions();
551
552 // Scan a single root MemRegion to mark everything reachable from it.
553 void scan_root_region(const MemRegion* region, uint worker_id);
554
555 // Do concurrent phase of marking, to a tentative transitive closure.
556 void mark_from_roots();
557
558 // Do concurrent preclean work.
559 void preclean();
560
561 void remark();
562
563 void cleanup();
564 // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
565 // this carefully.
566 inline void mark_in_prev_bitmap(oop p);
|
203 size_t capacity() const { return _chunk_capacity; }
204
205 // Expand the stack, typically in response to an overflow condition
206 void expand();
207
208 // Return the approximate number of oops on this mark stack. Racy due to
209 // unsynchronized access to _chunks_in_chunk_list.
210 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
211
212 void set_empty();
213
214 // Apply Fn to every oop on the mark stack. The mark stack must not
215 // be modified while iterating.
216 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
217 };
218
219 // Root MemRegions are memory areas that contain objects which references are
220 // roots wrt to the marking. They must be scanned before marking to maintain the
221 // SATB invariant.
222 // Typically they contain the areas from nTAMS to top of the regions.
223 // We could scan and mark through these objects during the concurrent start pause,
224 // but for pause time reasons we move this work to the concurrent phase.
225 // We need to complete this procedure before the next GC because it might determine
226 // that some of these "root objects" are dead, potentially dropping some required
227 // references.
228 // Root MemRegions comprise of the contents of survivor regions at the end
229 // of the GC, and any objects copied into the old gen during GC.
230 class G1CMRootMemRegions {
231 // The set of root MemRegions.
232 MemRegion* _root_regions;
233 size_t const _max_regions;
234
235 volatile size_t _num_root_regions; // Actual number of root regions.
236
237 volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
238
239 volatile bool _scan_in_progress;
240 volatile bool _should_abort;
241
242 void notify_scan_done();
243
244 public:
367
368 void finalize_marking();
369
370 void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes);
371 void weak_refs_work(bool clear_all_soft_refs);
372
373 void report_object_count(bool mark_completed);
374
375 void swap_mark_bitmaps();
376
377 void reclaim_empty_regions();
378
379 // After reclaiming empty regions, update heap sizes.
380 void compute_new_sizes();
381
382 // Clear statistics gathered during the concurrent cycle for the given region after
383 // it has been reclaimed.
384 void clear_statistics(HeapRegion* r);
385
386 // Resets the global marking data structures, as well as the
387 // task local ones; should be called during concurrent start.
388 void reset();
389
390 // Resets all the marking data structures. Called when we have to restart
391 // marking or when marking completes (via set_non_marking_state below).
392 void reset_marking_for_restart();
393
394 // We do this after we're done with marking so that the marking data
395 // structures are initialized to a sensible and predictable state.
396 void reset_at_marking_complete();
397
398 // Called to indicate how many threads are currently active.
399 void set_concurrency(uint active_tasks);
400
401 // Should be called to indicate which phase we're in (concurrent
402 // mark or remark) and how many threads are currently active.
403 void set_concurrency_and_phase(uint active_tasks, bool concurrent);
404
405 // Prints all gathered CM-related statistics
406 void print_stats();
407
418 // awkward. Originally, the code was written so that claim_region()
419 // either successfully returned with a non-empty region or there
420 // were no more regions to be claimed. The problem with this was
421 // that, in certain circumstances, it iterated over large chunks of
422 // the heap finding only empty regions and, while it was working, it
423 // was preventing the calling task to call its regular clock
424 // method. So, this way, each task will spend very little time in
425 // claim_region() and is allowed to call the regular clock method
426 // frequently.
427 HeapRegion* claim_region(uint worker_id);
428
429 // Determines whether we've run out of regions to scan. Note that
430 // the finger can point past the heap end in case the heap was expanded
431 // to satisfy an allocation without doing a GC. This is fine, because all
432 // objects in those regions will be considered live anyway because of
433 // SATB guarantees (i.e. their TAMS will be equal to bottom).
434 bool out_of_regions() { return _finger >= _heap.end(); }
435
436 // Returns the task with the given id
437 G1CMTask* task(uint id) {
438 // During concurrent start we use the parallel gc threads to do some work, so
439 // we can only compare against _max_num_tasks.
440 assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks);
441 return _tasks[id];
442 }
443
444 // Access / manipulation of the overflow flag which is set to
445 // indicate that the global stack has overflown
446 bool has_overflown() { return _has_overflown; }
447 void set_has_overflown() { _has_overflown = true; }
448 void clear_has_overflown() { _has_overflown = false; }
449 bool restart_for_overflow() { return _restart_for_overflow; }
450
451 // Methods to enter the two overflow sync barriers
452 void enter_first_sync_barrier(uint worker_id);
453 void enter_second_sync_barrier(uint worker_id);
454
455 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is
456 // true, periodically insert checks to see if this method should exit prematurely.
457 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
458
524
525 G1ConcurrentMarkThread* cm_thread() { return _cm_thread; }
526
527 const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; }
528 G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; }
529
530 // Calculates the number of concurrent GC threads to be used in the marking phase.
531 uint calc_active_marking_workers();
532
533 // Moves all per-task cached data into global state.
534 void flush_all_task_caches();
535 // Prepare internal data structures for the next mark cycle. This includes clearing
536 // the next mark bitmap and some internal data structures. This method is intended
537 // to be called concurrently to the mutator. It will yield to safepoint requests.
538 void cleanup_for_next_mark();
539
540 // Clear the previous marking bitmap during safepoint.
541 void clear_prev_bitmap(WorkGang* workers);
542
543 // These two methods do the work that needs to be done at the start and end of the
544 // concurrent start pause.
545 void pre_concurrent_start();
546 void post_concurrent_start();
547
548 // Scan all the root regions and mark everything reachable from
549 // them.
550 void scan_root_regions();
551
552 // Scan a single root MemRegion to mark everything reachable from it.
553 void scan_root_region(const MemRegion* region, uint worker_id);
554
555 // Do concurrent phase of marking, to a tentative transitive closure.
556 void mark_from_roots();
557
558 // Do concurrent preclean work.
559 void preclean();
560
561 void remark();
562
563 void cleanup();
564 // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
565 // this carefully.
566 inline void mark_in_prev_bitmap(oop p);
|