207 // Return whether the chunk list is empty. Racy due to unsynchronized access to
208 // _chunk_list.
209 bool is_empty() const { return _chunk_list == NULL; }
210
211 size_t capacity() const { return _chunk_capacity; }
212
213 // Expand the stack, typically in response to an overflow condition
214 void expand();
215
216 // Return the approximate number of oops on this mark stack. Racy due to
217 // unsynchronized access to _chunks_in_chunk_list.
218 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
219
220 void set_empty();
221
222 // Apply Fn to every oop on the mark stack. The mark stack must not
223 // be modified while iterating.
224 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
225 };
226
227 // Root Regions are regions that are not empty at the beginning of a
228 // marking cycle and which we might collect during an evacuation pause
229 // while the cycle is active. Given that, during evacuation pauses, we
230 // do not copy objects that are explicitly marked, what we have to do
231 // for the root regions is to scan them and mark all objects reachable
232 // from them. According to the SATB assumptions, we only need to visit
233 // each object once during marking. So, as long as we finish this scan
234 // before the next evacuation pause, we can copy the objects from the
235 // root regions without having to mark them or do anything else to them.
236 //
237 // Currently, we only support root region scanning once (at the start
238 // of the marking cycle) and the root regions are all the survivor
239 // regions populated during the initial-mark pause.
240 class G1CMRootRegions {
241 private:
242 const G1SurvivorRegions* _survivors;
243 G1ConcurrentMark* _cm;
244
245 volatile bool _scan_in_progress;
246 volatile bool _should_abort;
247 volatile int _claimed_survivor_index;
248
249 void notify_scan_done();
250
251 public:
252 G1CMRootRegions();
253 // We actually do most of the initialization in this method.
254 void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm);
255
256 // Reset the claiming / scanning of the root regions.
257 void prepare_for_scan();
258
259 // Forces get_next() to return NULL so that the iteration aborts early.
260 void abort() { _should_abort = true; }
261
262 // Return true if the CM thread are actively scanning root regions,
263 // false otherwise.
264 bool scan_in_progress() { return _scan_in_progress; }
265
266 // Claim the next root region to scan atomically, or return NULL if
267 // all have been claimed.
268 HeapRegion* claim_next();
269
270 // The number of root regions to scan.
271 uint num_root_regions() const;
272
273 void cancel_scan();
274
536
537 // Moves all per-task cached data into global state.
538 void flush_all_task_caches();
539 // Prepare internal data structures for the next mark cycle. This includes clearing
540 // the next mark bitmap and some internal data structures. This method is intended
541 // to be called concurrently to the mutator. It will yield to safepoint requests.
542 void cleanup_for_next_mark();
543
544 // Clear the previous marking bitmap during safepoint.
545 void clear_prev_bitmap(WorkGang* workers);
546
547 // These two methods do the work that needs to be done at the start and end of the
548 // initial mark pause.
549 void pre_initial_mark();
550 void post_initial_mark();
551
552 // Scan all the root regions and mark everything reachable from
553 // them.
554 void scan_root_regions();
555
556 // Scan a single root region and mark everything reachable from it.
557 void scan_root_region(HeapRegion* hr, uint worker_id);
558
559 // Do concurrent phase of marking, to a tentative transitive closure.
560 void mark_from_roots();
561
562 // Do concurrent preclean work.
563 void preclean();
564
565 void remark();
566
567 void cleanup();
568 // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
569 // this carefully.
570 inline void mark_in_prev_bitmap(oop p);
571
572 // Clears marks for all objects in the given range, for the prev or
573 // next bitmaps. Caution: the previous bitmap is usually
574 // read-only, so use this carefully!
575 void clear_range_in_prev_bitmap(MemRegion mr);
576
|
207 // Return whether the chunk list is empty. Racy due to unsynchronized access to
208 // _chunk_list.
209 bool is_empty() const { return _chunk_list == NULL; }
210
211 size_t capacity() const { return _chunk_capacity; }
212
213 // Expand the stack, typically in response to an overflow condition
214 void expand();
215
216 // Return the approximate number of oops on this mark stack. Racy due to
217 // unsynchronized access to _chunks_in_chunk_list.
218 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
219
220 void set_empty();
221
222 // Apply Fn to every oop on the mark stack. The mark stack must not
223 // be modified while iterating.
224 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
225 };
226
227 // Root Regions are regions that contain objects from nTAMS to top. These are roots
228 // for marking, i.e. their referenced objects must be kept alive to maintain the
229 // SATB invariant.
230 // We could scan and mark them through during the initial-mark pause, but for
231 // pause time reasons we move this work to the concurrent phase.
232 // We need to complete this procedure before the next GC because it might determine
233 // that some of these "root objects" are dead, potentially dropping some required
234 // references.
235 // Root regions comprise of the complete contents of survivor regions, and any
236 // objects copied into old gen during GC.
237 class G1CMRootRegions {
238 HeapRegion** _root_regions;
239 uint _max_regions;
240 volatile size_t _cur_regions;
241
242 volatile bool _scan_in_progress;
243 volatile bool _should_abort;
244
245 volatile size_t _claimed_root_regions;
246
247 void notify_scan_done();
248
249 public:
250 G1CMRootRegions();
251 // We actually do most of the initialization in this method.
252 void reset(uint const max_regions);
253
254 void add(HeapRegion* hr);
255
256 // Reset the claiming / scanning of the root regions.
257 void prepare_for_scan();
258
259 // Forces get_next() to return NULL so that the iteration aborts early.
260 void abort() { _should_abort = true; }
261
262 // Return true if the CM thread are actively scanning root regions,
263 // false otherwise.
264 bool scan_in_progress() { return _scan_in_progress; }
265
266 // Claim the next root region to scan atomically, or return NULL if
267 // all have been claimed.
268 HeapRegion* claim_next();
269
270 // The number of root regions to scan.
271 uint num_root_regions() const;
272
273 void cancel_scan();
274
536
537 // Moves all per-task cached data into global state.
538 void flush_all_task_caches();
539 // Prepare internal data structures for the next mark cycle. This includes clearing
540 // the next mark bitmap and some internal data structures. This method is intended
541 // to be called concurrently to the mutator. It will yield to safepoint requests.
542 void cleanup_for_next_mark();
543
544 // Clear the previous marking bitmap during safepoint.
545 void clear_prev_bitmap(WorkGang* workers);
546
547 // These two methods do the work that needs to be done at the start and end of the
548 // initial mark pause.
549 void pre_initial_mark();
550 void post_initial_mark();
551
552 // Scan all the root regions and mark everything reachable from
553 // them.
554 void scan_root_regions();
555
556 // Scan a single root region from nTAMS to top and mark everything reachable from it.
557 void scan_root_region(HeapRegion* hr, uint worker_id);
558
559 // Do concurrent phase of marking, to a tentative transitive closure.
560 void mark_from_roots();
561
562 // Do concurrent preclean work.
563 void preclean();
564
565 void remark();
566
567 void cleanup();
568 // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
569 // this carefully.
570 inline void mark_in_prev_bitmap(oop p);
571
572 // Clears marks for all objects in the given range, for the prev or
573 // next bitmaps. Caution: the previous bitmap is usually
574 // read-only, so use this carefully!
575 void clear_range_in_prev_bitmap(MemRegion mr);
576
|