789 if (cl.complete()) {
790 clear_all_count_data();
791 }
792
793 // Repeat the asserts from above.
794 guarantee(cmThread()->during_cycle(), "invariant");
795 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
796 }
797
798 class CheckBitmapClearHRClosure : public HeapRegionClosure {
799 CMBitMap* _bitmap;
800 bool _error;
801 public:
802 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
803 }
804
805 virtual bool doHeapRegion(HeapRegion* r) {
806 // This closure can be called concurrently to the mutator, so we must make sure
807 // that the result of the getNextMarkedWordAddress() call is compared to the
808 // value passed to it as limit to detect any found bits.
809 HeapWord* end = r->end();
810 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
811 }
812 };
813
814 bool ConcurrentMark::nextMarkBitmapIsClear() {
815 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
816 _g1h->heap_region_iterate(&cl);
817 return cl.complete();
818 }
819
820 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
821 public:
822 bool doHeapRegion(HeapRegion* r) {
823 r->note_start_of_marking();
824 return false;
825 }
826 };
827
828 void ConcurrentMark::checkpointRootsInitialPre() {
1326
1327 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1328 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1329
1330 // Note: if we're looking at the last region in heap - obj_end
1331 // could be actually just beyond the end of the heap; end_idx
1332 // will then correspond to a (non-existent) card that is also
1333 // just beyond the heap.
1334 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1335 // end of object is not card aligned - increment to cover
1336 // all the cards spanned by the object
1337 end_idx += 1;
1338 }
1339
1340 // Set the bits in the card BM for the cards spanned by this object.
1341 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1342
1343 // Add the size of this object to the number of marked bytes.
1344 marked_bytes += (size_t)obj_sz * HeapWordSize;
1345
1346 if (obj_end > hr->end()) {
1347 break;
1348 }
1349 // Find the next marked object after this one.
1350 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1351 }
1352
1353 // Mark the allocated-since-marking portion...
1354 HeapWord* top = hr->top();
1355 if (ntams < top) {
1356 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1357 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1358
1359 // Note: if we're looking at the last region in heap - top
1360 // could be actually just beyond the end of the heap; end_idx
1361 // will then correspond to a (non-existent) card that is also
1362 // just beyond the heap.
1363 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1364 // end of object is not card aligned - increment to cover
1365 // all the cards spanned by the object
1415 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1416 _failures(0) { }
1417
1418 int failures() const { return _failures; }
1419
1420 bool doHeapRegion(HeapRegion* hr) {
1421 int failures = 0;
1422
1423 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1424 // this region and set the corresponding bits in the expected region
1425 // and card bitmaps.
1426 bool res = _calc_cl.doHeapRegion(hr);
1427 assert(res == false, "should be continuing");
1428
1429 // Verify the marked bytes for this region.
1430 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1431 size_t act_marked_bytes = hr->next_marked_bytes();
1432
1433 // We're not OK if expected marked bytes > actual marked bytes. It means
1434 // we have missed accounting some objects during the actual marking.
1435 if (exp_marked_bytes > act_marked_bytes) {
1436 failures += 1;
1437 }
1438
1439 // Verify the bit, for this region, in the actual and expected
1440 // (which was just calculated) region bit maps.
1441 // We're not OK if the bit in the calculated expected region
1442 // bitmap is set and the bit in the actual region bitmap is not.
1443 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1444
1445 bool expected = _exp_region_bm->at(index);
1446 bool actual = _region_bm->at(index);
1447 if (expected && !actual) {
1448 failures += 1;
1449 }
1450
1451 // Verify that the card bit maps for the cards spanned by the current
1452 // region match. We have an error if we have a set bit in the expected
1453 // bit map and the corresponding bit in the actual bitmap is not set.
1454
1455 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
2423 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2424 // Note we are overriding the read-only view of the prev map here, via
2425 // the cast.
2426 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2427 }
2428
2429 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2430 _nextMarkBitMap->clearRange(mr);
2431 }
2432
2433 HeapRegion*
2434 ConcurrentMark::claim_region(uint worker_id) {
2435 // "checkpoint" the finger
2436 HeapWord* finger = _finger;
2437
2438 // _heap_end will not change underneath our feet; it only changes at
2439 // yield points.
2440 while (finger < _heap_end) {
2441 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2442
2443 // Note on how this code handles humongous regions. In the
2444 // normal case the finger will reach the start of a "starts
2445 // humongous" (SH) region. Its end will either be the end of the
2446 // last "continues humongous" (CH) region in the sequence, or the
2447 // standard end of the SH region (if the SH is the only region in
2448 // the sequence). That way claim_region() will skip over the CH
2449 // regions. However, there is a subtle race between a CM thread
2450 // executing this method and a mutator thread doing a humongous
2451 // object allocation. The two are not mutually exclusive as the CM
2452 // thread does not need to hold the Heap_lock when it gets
2453 // here. So there is a chance that claim_region() will come across
2454 // a free region that's in the progress of becoming a SH or a CH
2455 // region. In the former case, it will either
2456 // a) Miss the update to the region's end, in which case it will
2457 // visit every subsequent CH region, will find their bitmaps
2458 // empty, and do nothing, or
2459 // b) Will observe the update of the region's end (in which case
2460 // it will skip the subsequent CH regions).
2461 // If it comes across a region that suddenly becomes CH, the
2462 // scenario will be similar to b). So, the race between
2463 // claim_region() and a humongous object allocation might force us
2464 // to do a bit of unnecessary work (due to some unnecessary bitmap
2465 // iterations) but it should not introduce and correctness issues.
2466 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
2467
2468 // Above heap_region_containing may return NULL as we always scan claim
2469 // until the end of the heap. In this case, just jump to the next region.
2470 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2471
2472 // Is the gap between reading the finger and doing the CAS too long?
2473 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2474 if (res == finger && curr_region != NULL) {
2475 // we succeeded
2476 HeapWord* bottom = curr_region->bottom();
2477 HeapWord* limit = curr_region->next_top_at_mark_start();
2478
2479 // notice that _finger == end cannot be guaranteed here since,
2480 // someone else might have moved the finger even further
2481 assert(_finger >= end, "the finger should have moved forward");
2482
2483 if (limit > bottom) {
2484 return curr_region;
2485 } else {
2524 };
2525
2526 void ConcurrentMark::verify_no_cset_oops() {
2527 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2528 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2529 return;
2530 }
2531
2532 // Verify entries on the global mark stack
2533 _markStack.iterate(VerifyNoCSetOops("Stack"));
2534
2535 // Verify entries on the task queues
2536 for (uint i = 0; i < _max_worker_id; ++i) {
2537 CMTaskQueue* queue = _task_queues->queue(i);
2538 queue->iterate(VerifyNoCSetOops("Queue", i));
2539 }
2540
2541 // Verify the global finger
2542 HeapWord* global_finger = finger();
2543 if (global_finger != NULL && global_finger < _heap_end) {
2544 // The global finger always points to a heap region boundary. We
2545 // use heap_region_containing() to get the containing region
2546 // given that the global finger could be pointing to a free region
2547 // which subsequently becomes continues humongous. If that
2548 // happens, heap_region_containing() will return the bottom of the
2549 // corresponding starts humongous region and the check below will
2550 // not hold any more.
2551 // Since we always iterate over all regions, we might get a NULL HeapRegion
2552 // here.
2553 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2554 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2555 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2556 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2557 }
2558
2559 // Verify the task fingers
2560 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2561 for (uint i = 0; i < parallel_marking_threads(); ++i) {
2562 CMTask* task = _tasks[i];
2563 HeapWord* task_finger = task->finger();
2564 if (task_finger != NULL && task_finger < _heap_end) {
2565 // See above note on the global finger verification.
2566 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2567 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2568 !task_hr->in_collection_set(),
2569 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2570 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
|
789 if (cl.complete()) {
790 clear_all_count_data();
791 }
792
793 // Repeat the asserts from above.
794 guarantee(cmThread()->during_cycle(), "invariant");
795 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
796 }
797
798 class CheckBitmapClearHRClosure : public HeapRegionClosure {
799 CMBitMap* _bitmap;
800 bool _error;
801 public:
802 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
803 }
804
805 virtual bool doHeapRegion(HeapRegion* r) {
806 // This closure can be called concurrently to the mutator, so we must make sure
807 // that the result of the getNextMarkedWordAddress() call is compared to the
808 // value passed to it as limit to detect any found bits.
809 // end never changes in G1.
810 HeapWord* end = r->end();
811 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
812 }
813 };
814
815 bool ConcurrentMark::nextMarkBitmapIsClear() {
816 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
817 _g1h->heap_region_iterate(&cl);
818 return cl.complete();
819 }
820
821 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
822 public:
823 bool doHeapRegion(HeapRegion* r) {
824 r->note_start_of_marking();
825 return false;
826 }
827 };
828
829 void ConcurrentMark::checkpointRootsInitialPre() {
1327
1328 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1329 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1330
1331 // Note: if we're looking at the last region in heap - obj_end
1332 // could be actually just beyond the end of the heap; end_idx
1333 // will then correspond to a (non-existent) card that is also
1334 // just beyond the heap.
1335 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1336 // end of object is not card aligned - increment to cover
1337 // all the cards spanned by the object
1338 end_idx += 1;
1339 }
1340
1341 // Set the bits in the card BM for the cards spanned by this object.
1342 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1343
1344 // Add the size of this object to the number of marked bytes.
1345 marked_bytes += (size_t)obj_sz * HeapWordSize;
1346
1347 // This will happen if we are handling a humongous object that spans
1348 // several heap regions.
1349 if (obj_end > hr->end()) {
1350 break;
1351 }
1352 // Find the next marked object after this one.
1353 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1354 }
1355
1356 // Mark the allocated-since-marking portion...
1357 HeapWord* top = hr->top();
1358 if (ntams < top) {
1359 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1360 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1361
1362 // Note: if we're looking at the last region in heap - top
1363 // could be actually just beyond the end of the heap; end_idx
1364 // will then correspond to a (non-existent) card that is also
1365 // just beyond the heap.
1366 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1367 // end of object is not card aligned - increment to cover
1368 // all the cards spanned by the object
1418 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1419 _failures(0) { }
1420
1421 int failures() const { return _failures; }
1422
1423 bool doHeapRegion(HeapRegion* hr) {
1424 int failures = 0;
1425
1426 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1427 // this region and set the corresponding bits in the expected region
1428 // and card bitmaps.
1429 bool res = _calc_cl.doHeapRegion(hr);
1430 assert(res == false, "should be continuing");
1431
1432 // Verify the marked bytes for this region.
1433 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1434 size_t act_marked_bytes = hr->next_marked_bytes();
1435
1436 // We're not OK if expected marked bytes > actual marked bytes. It means
1437 // we have missed accounting some objects during the actual marking.
1438 // For start_humongous regions, the size of the whole object will be
1439 // in exp_marked_bytes, so this check does not apply in this case.
1440 if (exp_marked_bytes > act_marked_bytes && !hr->is_starts_humongous()) {
1441 failures += 1;
1442 }
1443
1444 // Verify the bit, for this region, in the actual and expected
1445 // (which was just calculated) region bit maps.
1446 // We're not OK if the bit in the calculated expected region
1447 // bitmap is set and the bit in the actual region bitmap is not.
1448 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1449
1450 bool expected = _exp_region_bm->at(index);
1451 bool actual = _region_bm->at(index);
1452 if (expected && !actual) {
1453 failures += 1;
1454 }
1455
1456 // Verify that the card bit maps for the cards spanned by the current
1457 // region match. We have an error if we have a set bit in the expected
1458 // bit map and the corresponding bit in the actual bitmap is not set.
1459
1460 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
2428 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2429 // Note we are overriding the read-only view of the prev map here, via
2430 // the cast.
2431 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2432 }
2433
2434 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2435 _nextMarkBitMap->clearRange(mr);
2436 }
2437
2438 HeapRegion*
2439 ConcurrentMark::claim_region(uint worker_id) {
2440 // "checkpoint" the finger
2441 HeapWord* finger = _finger;
2442
2443 // _heap_end will not change underneath our feet; it only changes at
2444 // yield points.
2445 while (finger < _heap_end) {
2446 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2447
2448 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
2449
2450 // Above heap_region_containing may return NULL as we always scan claim
2451 // until the end of the heap. In this case, just jump to the next region.
2452 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2453
2454 // Is the gap between reading the finger and doing the CAS too long?
2455 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2456 if (res == finger && curr_region != NULL) {
2457 // we succeeded
2458 HeapWord* bottom = curr_region->bottom();
2459 HeapWord* limit = curr_region->next_top_at_mark_start();
2460
2461 // notice that _finger == end cannot be guaranteed here since,
2462 // someone else might have moved the finger even further
2463 assert(_finger >= end, "the finger should have moved forward");
2464
2465 if (limit > bottom) {
2466 return curr_region;
2467 } else {
2506 };
2507
2508 void ConcurrentMark::verify_no_cset_oops() {
2509 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2510 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2511 return;
2512 }
2513
2514 // Verify entries on the global mark stack
2515 _markStack.iterate(VerifyNoCSetOops("Stack"));
2516
2517 // Verify entries on the task queues
2518 for (uint i = 0; i < _max_worker_id; ++i) {
2519 CMTaskQueue* queue = _task_queues->queue(i);
2520 queue->iterate(VerifyNoCSetOops("Queue", i));
2521 }
2522
2523 // Verify the global finger
2524 HeapWord* global_finger = finger();
2525 if (global_finger != NULL && global_finger < _heap_end) {
2526 // Since we always iterate over all regions, we might get a NULL HeapRegion
2527 // here.
2528 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2529 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2530 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2531 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2532 }
2533
2534 // Verify the task fingers
2535 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2536 for (uint i = 0; i < parallel_marking_threads(); ++i) {
2537 CMTask* task = _tasks[i];
2538 HeapWord* task_finger = task->finger();
2539 if (task_finger != NULL && task_finger < _heap_end) {
2540 // See above note on the global finger verification.
2541 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2542 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2543 !task_hr->in_collection_set(),
2544 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2545 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
|