190 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
191 if (result != NULL) {
192 _chunks_in_chunk_list--;
193 }
194 return result;
195 }
196
197 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
198 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
199 return remove_chunk_from_list(&_free_list);
200 }
201
202 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
203 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
204 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
205 // wraparound of _hwm.
206 if (_hwm >= _chunk_capacity) {
207 return NULL;
208 }
209
210 size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
211 if (cur_idx >= _chunk_capacity) {
212 return NULL;
213 }
214
215 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
216 result->next = NULL;
217 return result;
218 }
219
220 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
221 // Get a new chunk.
222 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
223
224 if (new_chunk == NULL) {
225 // Did not get a chunk from the free list. Allocate from backing memory.
226 new_chunk = allocate_new_chunk();
227
228 if (new_chunk == NULL) {
229 return false;
230 }
263 _num_root_regions(0),
264 _claimed_root_regions(0),
265 _scan_in_progress(false),
266 _should_abort(false) {
267 _root_regions = new MemRegion[_max_regions];
268 if (_root_regions == NULL) {
269 vm_exit_during_initialization("Could not allocate root MemRegion set.");
270 }
271 }
272
273 G1CMRootMemRegions::~G1CMRootMemRegions() {
274 delete[] _root_regions;
275 }
276
277 void G1CMRootMemRegions::reset() {
278 _num_root_regions = 0;
279 }
280
281 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
282 assert_at_safepoint();
283 size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
284 assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
285 assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
286 "end (" PTR_FORMAT ")", p2i(start), p2i(end));
287 _root_regions[idx].set_start(start);
288 _root_regions[idx].set_end(end);
289 }
290
291 void G1CMRootMemRegions::prepare_for_scan() {
292 assert(!scan_in_progress(), "pre-condition");
293
294 _scan_in_progress = _num_root_regions > 0;
295
296 _claimed_root_regions = 0;
297 _should_abort = false;
298 }
299
300 const MemRegion* G1CMRootMemRegions::claim_next() {
301 if (_should_abort) {
302 // If someone has set the should_abort flag, we return NULL to
303 // force the caller to bail out of their loop.
304 return NULL;
305 }
306
307 if (_claimed_root_regions >= _num_root_regions) {
308 return NULL;
309 }
310
311 size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
312 if (claimed_index < _num_root_regions) {
313 return &_root_regions[claimed_index];
314 }
315 return NULL;
316 }
317
318 uint G1CMRootMemRegions::num_root_regions() const {
319 return (uint)_num_root_regions;
320 }
321
322 void G1CMRootMemRegions::notify_scan_done() {
323 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
324 _scan_in_progress = false;
325 RootRegionScan_lock->notify_all();
326 }
327
328 void G1CMRootMemRegions::cancel_scan() {
329 notify_scan_done();
330 }
331
1104 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1105
1106 virtual bool do_heap_region(HeapRegion* r) {
1107 update_remset_before_rebuild(r);
1108 update_marked_bytes(r);
1109
1110 return false;
1111 }
1112
1113 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1114 };
1115
1116 public:
1117 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1118 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1119 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1120
1121 virtual void work(uint worker_id) {
1122 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1123 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1124 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1125 }
1126
1127 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1128
1129 // Number of regions for which roughly one thread should be spawned for this work.
1130 static const uint RegionsPerThread = 384;
1131 };
1132
1133 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1134 G1CollectedHeap* _g1h;
1135 public:
1136 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1137
1138 virtual bool do_heap_region(HeapRegion* r) {
1139 _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1140 return false;
1141 }
1142 };
1143
1144 void G1ConcurrentMark::remark() {
1889 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1890 _prev_mark_bitmap->clear_range(mr);
1891 }
1892
1893 HeapRegion*
1894 G1ConcurrentMark::claim_region(uint worker_id) {
1895 // "checkpoint" the finger
1896 HeapWord* finger = _finger;
1897
1898 while (finger < _heap.end()) {
1899 assert(_g1h->is_in_g1_reserved(finger), "invariant");
1900
1901 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1902 // Make sure that the reads below do not float before loading curr_region.
1903 OrderAccess::loadload();
1904 // Above heap_region_containing may return NULL as we always scan claim
1905 // until the end of the heap. In this case, just jump to the next region.
1906 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1907
1908 // Is the gap between reading the finger and doing the CAS too long?
1909 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1910 if (res == finger && curr_region != NULL) {
1911 // we succeeded
1912 HeapWord* bottom = curr_region->bottom();
1913 HeapWord* limit = curr_region->next_top_at_mark_start();
1914
1915 // notice that _finger == end cannot be guaranteed here since,
1916 // someone else might have moved the finger even further
1917 assert(_finger >= end, "the finger should have moved forward");
1918
1919 if (limit > bottom) {
1920 return curr_region;
1921 } else {
1922 assert(limit == bottom,
1923 "the region limit should be at bottom");
1924 // we return NULL and the caller should try calling
1925 // claim_region() again.
1926 return NULL;
1927 }
1928 } else {
1929 assert(_finger > finger, "the finger should have moved forward");
|
190 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
191 if (result != NULL) {
192 _chunks_in_chunk_list--;
193 }
194 return result;
195 }
196
197 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
198 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
199 return remove_chunk_from_list(&_free_list);
200 }
201
202 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
203 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
204 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
205 // wraparound of _hwm.
206 if (_hwm >= _chunk_capacity) {
207 return NULL;
208 }
209
210 size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
211 if (cur_idx >= _chunk_capacity) {
212 return NULL;
213 }
214
215 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
216 result->next = NULL;
217 return result;
218 }
219
220 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
221 // Get a new chunk.
222 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
223
224 if (new_chunk == NULL) {
225 // Did not get a chunk from the free list. Allocate from backing memory.
226 new_chunk = allocate_new_chunk();
227
228 if (new_chunk == NULL) {
229 return false;
230 }
263 _num_root_regions(0),
264 _claimed_root_regions(0),
265 _scan_in_progress(false),
266 _should_abort(false) {
267 _root_regions = new MemRegion[_max_regions];
268 if (_root_regions == NULL) {
269 vm_exit_during_initialization("Could not allocate root MemRegion set.");
270 }
271 }
272
273 G1CMRootMemRegions::~G1CMRootMemRegions() {
274 delete[] _root_regions;
275 }
276
277 void G1CMRootMemRegions::reset() {
278 _num_root_regions = 0;
279 }
280
281 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
282 assert_at_safepoint();
283 size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
284 assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
285 assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
286 "end (" PTR_FORMAT ")", p2i(start), p2i(end));
287 _root_regions[idx].set_start(start);
288 _root_regions[idx].set_end(end);
289 }
290
291 void G1CMRootMemRegions::prepare_for_scan() {
292 assert(!scan_in_progress(), "pre-condition");
293
294 _scan_in_progress = _num_root_regions > 0;
295
296 _claimed_root_regions = 0;
297 _should_abort = false;
298 }
299
300 const MemRegion* G1CMRootMemRegions::claim_next() {
301 if (_should_abort) {
302 // If someone has set the should_abort flag, we return NULL to
303 // force the caller to bail out of their loop.
304 return NULL;
305 }
306
307 if (_claimed_root_regions >= _num_root_regions) {
308 return NULL;
309 }
310
311 size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
312 if (claimed_index < _num_root_regions) {
313 return &_root_regions[claimed_index];
314 }
315 return NULL;
316 }
317
318 uint G1CMRootMemRegions::num_root_regions() const {
319 return (uint)_num_root_regions;
320 }
321
322 void G1CMRootMemRegions::notify_scan_done() {
323 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
324 _scan_in_progress = false;
325 RootRegionScan_lock->notify_all();
326 }
327
328 void G1CMRootMemRegions::cancel_scan() {
329 notify_scan_done();
330 }
331
1104 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1105
1106 virtual bool do_heap_region(HeapRegion* r) {
1107 update_remset_before_rebuild(r);
1108 update_marked_bytes(r);
1109
1110 return false;
1111 }
1112
1113 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1114 };
1115
1116 public:
1117 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1118 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1119 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1120
1121 virtual void work(uint worker_id) {
1122 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1123 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1124 Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
1125 }
1126
1127 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1128
1129 // Number of regions for which roughly one thread should be spawned for this work.
1130 static const uint RegionsPerThread = 384;
1131 };
1132
1133 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1134 G1CollectedHeap* _g1h;
1135 public:
1136 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1137
1138 virtual bool do_heap_region(HeapRegion* r) {
1139 _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1140 return false;
1141 }
1142 };
1143
1144 void G1ConcurrentMark::remark() {
1889 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1890 _prev_mark_bitmap->clear_range(mr);
1891 }
1892
1893 HeapRegion*
1894 G1ConcurrentMark::claim_region(uint worker_id) {
1895 // "checkpoint" the finger
1896 HeapWord* finger = _finger;
1897
1898 while (finger < _heap.end()) {
1899 assert(_g1h->is_in_g1_reserved(finger), "invariant");
1900
1901 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1902 // Make sure that the reads below do not float before loading curr_region.
1903 OrderAccess::loadload();
1904 // Above heap_region_containing may return NULL as we always scan claim
1905 // until the end of the heap. In this case, just jump to the next region.
1906 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1907
1908 // Is the gap between reading the finger and doing the CAS too long?
1909 HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
1910 if (res == finger && curr_region != NULL) {
1911 // we succeeded
1912 HeapWord* bottom = curr_region->bottom();
1913 HeapWord* limit = curr_region->next_top_at_mark_start();
1914
1915 // notice that _finger == end cannot be guaranteed here since,
1916 // someone else might have moved the finger even further
1917 assert(_finger >= end, "the finger should have moved forward");
1918
1919 if (limit > bottom) {
1920 return curr_region;
1921 } else {
1922 assert(limit == bottom,
1923 "the region limit should be at bottom");
1924 // we return NULL and the caller should try calling
1925 // claim_region() again.
1926 return NULL;
1927 }
1928 } else {
1929 assert(_finger > finger, "the finger should have moved forward");
|