40 #include "gc/g1/heapRegionSet.inline.hpp"
41 #include "gc/g1/suspendibleThreadSet.hpp"
42 #include "gc/shared/gcId.hpp"
43 #include "gc/shared/gcTimer.hpp"
44 #include "gc/shared/gcTrace.hpp"
45 #include "gc/shared/gcTraceTime.inline.hpp"
46 #include "gc/shared/genOopClosures.inline.hpp"
47 #include "gc/shared/referencePolicy.hpp"
48 #include "gc/shared/strongRootsScope.hpp"
49 #include "gc/shared/taskqueue.inline.hpp"
50 #include "gc/shared/vmGCOperations.hpp"
51 #include "logging/log.hpp"
52 #include "memory/allocation.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/atomic.inline.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/java.hpp"
58 #include "runtime/prefetch.inline.hpp"
59 #include "services/memTracker.hpp"
60
61 // Concurrent marking bit map wrapper
62
63 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
64 _bm(),
65 _shifter(shifter) {
66 _bmStartWord = 0;
67 _bmWordSize = 0;
68 }
69
70 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
71 const HeapWord* limit) const {
72 // First we must round addr *up* to a possible object boundary.
73 addr = (HeapWord*)align_size_up((intptr_t)addr,
74 HeapWordSize << _shifter);
75 size_t addrOffset = heapWordToOffset(addr);
76 assert(limit != NULL, "limit must not be NULL");
77 size_t limitOffset = heapWordToOffset(limit);
78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
79 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
244
245 void G1CMMarkStack::note_start_of_gc() {
246 assert(_saved_index == -1,
247 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
248 _saved_index = _index;
249 }
250
251 void G1CMMarkStack::note_end_of_gc() {
252 // This is intentionally a guarantee, instead of an assert. If we
253 // accidentally add something to the mark stack during GC, it
254 // will be a correctness issue so it's better if we crash. we'll
255 // only check this once per GC anyway, so it won't be a performance
256 // issue in any way.
257 guarantee(_saved_index == _index,
258 "saved index: %d index: %d", _saved_index, _index);
259 _saved_index = -1;
260 }
261
262 G1CMRootRegions::G1CMRootRegions() :
263 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
264 _should_abort(false), _next_survivor(NULL) { }
265
266 void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) {
267 _young_list = g1h->young_list();
268 _cm = cm;
269 }
270
271 void G1CMRootRegions::prepare_for_scan() {
272 assert(!scan_in_progress(), "pre-condition");
273
274 // Currently, only survivors can be root regions.
275 assert(_next_survivor == NULL, "pre-condition");
276 _next_survivor = _young_list->first_survivor_region();
277 _scan_in_progress = (_next_survivor != NULL);
278 _should_abort = false;
279 }
280
281 HeapRegion* G1CMRootRegions::claim_next() {
282 if (_should_abort) {
283 // If someone has set the should_abort flag, we return NULL to
284 // force the caller to bail out of their loop.
285 return NULL;
286 }
287
288 // Currently, only survivors can be root regions.
289 HeapRegion* res = _next_survivor;
290 if (res != NULL) {
291 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
292 // Read it again in case it changed while we were waiting for the lock.
293 res = _next_survivor;
294 if (res != NULL) {
295 if (res == _young_list->last_survivor_region()) {
296 // We just claimed the last survivor so store NULL to indicate
297 // that we're done.
298 _next_survivor = NULL;
299 } else {
300 _next_survivor = res->get_next_young_region();
301 }
302 } else {
303 // Someone else claimed the last survivor while we were trying
304 // to take the lock so nothing else to do.
305 }
306 }
307 assert(res == NULL || res->is_survivor(), "post-condition");
308
309 return res;
310 }
311
312 void G1CMRootRegions::notify_scan_done() {
313 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
314 _scan_in_progress = false;
315 RootRegionScan_lock->notify_all();
316 }
317
318 void G1CMRootRegions::cancel_scan() {
319 notify_scan_done();
320 }
321
322 void G1CMRootRegions::scan_finished() {
323 assert(scan_in_progress(), "pre-condition");
324
325 // Currently, only survivors can be root regions.
326 if (!_should_abort) {
327 assert(_next_survivor == NULL, "we should have claimed all survivors");
328 }
329 _next_survivor = NULL;
330
331 notify_scan_done();
332 }
333
334 bool G1CMRootRegions::wait_until_scan_finished() {
335 if (!scan_in_progress()) return false;
336
337 {
338 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
339 while (scan_in_progress()) {
340 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
341 }
342 }
343 return true;
344 }
345
346 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
347 return MAX2((n_par_threads + 2) / 4, 1U);
348 }
349
|
40 #include "gc/g1/heapRegionSet.inline.hpp"
41 #include "gc/g1/suspendibleThreadSet.hpp"
42 #include "gc/shared/gcId.hpp"
43 #include "gc/shared/gcTimer.hpp"
44 #include "gc/shared/gcTrace.hpp"
45 #include "gc/shared/gcTraceTime.inline.hpp"
46 #include "gc/shared/genOopClosures.inline.hpp"
47 #include "gc/shared/referencePolicy.hpp"
48 #include "gc/shared/strongRootsScope.hpp"
49 #include "gc/shared/taskqueue.inline.hpp"
50 #include "gc/shared/vmGCOperations.hpp"
51 #include "logging/log.hpp"
52 #include "memory/allocation.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/atomic.inline.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/java.hpp"
58 #include "runtime/prefetch.inline.hpp"
59 #include "services/memTracker.hpp"
60 #include "utilities/growableArray.hpp"
61
62 // Concurrent marking bit map wrapper
63
64 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
65 _bm(),
66 _shifter(shifter) {
67 _bmStartWord = 0;
68 _bmWordSize = 0;
69 }
70
71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
72 const HeapWord* limit) const {
73 // First we must round addr *up* to a possible object boundary.
74 addr = (HeapWord*)align_size_up((intptr_t)addr,
75 HeapWordSize << _shifter);
76 size_t addrOffset = heapWordToOffset(addr);
77 assert(limit != NULL, "limit must not be NULL");
78 size_t limitOffset = heapWordToOffset(limit);
79 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
80 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
245
246 void G1CMMarkStack::note_start_of_gc() {
247 assert(_saved_index == -1,
248 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
249 _saved_index = _index;
250 }
251
252 void G1CMMarkStack::note_end_of_gc() {
253 // This is intentionally a guarantee, instead of an assert. If we
254 // accidentally add something to the mark stack during GC, it
255 // will be a correctness issue so it's better if we crash. we'll
256 // only check this once per GC anyway, so it won't be a performance
257 // issue in any way.
258 guarantee(_saved_index == _index,
259 "saved index: %d index: %d", _saved_index, _index);
260 _saved_index = -1;
261 }
262
263 G1CMRootRegions::G1CMRootRegions() :
264 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
265 _should_abort(false), _claimed_survivor_index(0) { }
266
267 void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) {
268 _young_list = g1h->young_list();
269 _cm = cm;
270 }
271
272 void G1CMRootRegions::prepare_for_scan() {
273 assert(!scan_in_progress(), "pre-condition");
274
275 // Currently, only survivors can be root regions.
276 _claimed_survivor_index = 0;
277 _scan_in_progress = true;
278 _should_abort = false;
279 }
280
281 HeapRegion* G1CMRootRegions::claim_next() {
282 if (_should_abort) {
283 // If someone has set the should_abort flag, we return NULL to
284 // force the caller to bail out of their loop.
285 return NULL;
286 }
287
288 // Currently, only survivors can be root regions.
289 const GrowableArray<HeapRegion*>* survivor_regions = _young_list->survivor_regions();
290
291 // The claimed survivor index is a 1-based index into the survivor regions array
292 // this allows us to initialize the index to 0 and avoid signed overflow issues.
293 int claimed_index = Atomic::add(1, &_claimed_survivor_index);
294 assert(claimed_index > 0, "%d must always be positive", claimed_index);
295 claimed_index--;
296 if (claimed_index < survivor_regions->length()) {
297 return survivor_regions->at(claimed_index);
298 }
299 return NULL;
300 }
301
302 void G1CMRootRegions::notify_scan_done() {
303 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
304 _scan_in_progress = false;
305 RootRegionScan_lock->notify_all();
306 }
307
308 void G1CMRootRegions::cancel_scan() {
309 notify_scan_done();
310 }
311
312 void G1CMRootRegions::scan_finished() {
313 assert(scan_in_progress(), "pre-condition");
314
315 // Currently, only survivors can be root regions.
316 if (!_should_abort) {
317 assert(_claimed_survivor_index >= _young_list->survivor_regions()->length(),
318 "we should have claimed all survivors, claimed index = %d, length = %d",
319 _claimed_survivor_index, _young_list->survivor_regions()->length());
320 }
321
322 notify_scan_done();
323 }
324
325 bool G1CMRootRegions::wait_until_scan_finished() {
326 if (!scan_in_progress()) return false;
327
328 {
329 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
330 while (scan_in_progress()) {
331 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
332 }
333 }
334 return true;
335 }
336
337 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
338 return MAX2((n_par_threads + 2) / 4, 1U);
339 }
340
|