789 if (cl.complete()) {
790 clear_all_count_data();
791 }
792
793 // Repeat the asserts from above.
794 guarantee(cmThread()->during_cycle(), "invariant");
795 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
796 }
797
798 class CheckBitmapClearHRClosure : public HeapRegionClosure {
799 CMBitMap* _bitmap;
800 bool _error;
801 public:
802 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
803 }
804
805 virtual bool doHeapRegion(HeapRegion* r) {
806 // This closure can be called concurrently to the mutator, so we must make sure
807 // that the result of the getNextMarkedWordAddress() call is compared to the
808 // value passed to it as limit to detect any found bits.
809 // We can use the region's orig_end() for the limit and the comparison value
810 // as it always contains the "real" end of the region that never changes and
811 // has no side effects.
812 // Due to the latter, there can also be no problem with the compiler generating
813 // reloads of the orig_end() call.
814 HeapWord* end = r->orig_end();
815 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
816 }
817 };
818
819 bool ConcurrentMark::nextMarkBitmapIsClear() {
820 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
821 _g1h->heap_region_iterate(&cl);
822 return cl.complete();
823 }
824
825 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
826 public:
827 bool doHeapRegion(HeapRegion* r) {
828 if (!r->is_continues_humongous()) {
829 r->note_start_of_marking();
830 }
831 return false;
832 }
833 };
834
835 void ConcurrentMark::checkpointRootsInitialPre() {
836 G1CollectedHeap* g1h = G1CollectedHeap::heap();
837 G1CollectorPolicy* g1p = g1h->g1_policy();
838
839 _has_aborted = false;
840
841 // Initialize marking structures. This has to be done in a STW phase.
842 reset();
843
844 // For each region note start of marking.
845 NoteStartOfMarkHRClosure startcl;
846 g1h->heap_region_iterate(&startcl);
847 }
848
849
850 void ConcurrentMark::checkpointRootsInitialPost() {
1269
1270 g1p->record_concurrent_mark_remark_end();
1271
1272 G1CMIsAliveClosure is_alive(g1h);
1273 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1274 }
1275
1276 // Base class of the closures that finalize and verify the
1277 // liveness counting data.
1278 class CMCountDataClosureBase: public HeapRegionClosure {
1279 protected:
1280 G1CollectedHeap* _g1h;
1281 ConcurrentMark* _cm;
1282 CardTableModRefBS* _ct_bs;
1283
1284 BitMap* _region_bm;
1285 BitMap* _card_bm;
1286
1287 // Takes a region that's not empty (i.e., it has at least one
1288 // live object in it and sets its corresponding bit on the region
1289 // bitmap to 1. If the region is "starts humongous" it will also set
1290 // to 1 the bits on the region bitmap that correspond to its
1291 // associated "continues humongous" regions.
1292 void set_bit_for_region(HeapRegion* hr) {
1293 assert(!hr->is_continues_humongous(), "should have filtered those out");
1294
1295 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1296 if (!hr->is_starts_humongous()) {
1297 // Normal (non-humongous) case: just set the bit.
1298 _region_bm->par_at_put(index, true);
1299 } else {
1300 // Starts humongous case: calculate how many regions are part of
1301 // this humongous region and then set the bit range.
1302 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1303 _region_bm->par_at_put_range(index, end_index, true);
1304 }
1305 }
1306
1307 public:
1308 CMCountDataClosureBase(G1CollectedHeap* g1h,
1309 BitMap* region_bm, BitMap* card_bm):
1310 _g1h(g1h), _cm(g1h->concurrent_mark()),
1311 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1312 _region_bm(region_bm), _card_bm(card_bm) { }
1313 };
1314
1315 // Closure that calculates the # live objects per region. Used
1316 // for verification purposes during the cleanup pause.
1317 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1318 CMBitMapRO* _bm;
1319 size_t _region_marked_bytes;
1320
1321 public:
1322 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1323 BitMap* region_bm, BitMap* card_bm) :
1324 CMCountDataClosureBase(g1h, region_bm, card_bm),
1325 _bm(bm), _region_marked_bytes(0) { }
1326
1327 bool doHeapRegion(HeapRegion* hr) {
1328
1329 if (hr->is_continues_humongous()) {
1330 // We will ignore these here and process them when their
1331 // associated "starts humongous" region is processed (see
1332 // set_bit_for_heap_region()). Note that we cannot rely on their
1333 // associated "starts humongous" region to have their bit set to
1334 // 1 since, due to the region chunking in the parallel region
1335 // iteration, a "continues humongous" region might be visited
1336 // before its associated "starts humongous".
1337 return false;
1338 }
1339
1340 HeapWord* ntams = hr->next_top_at_mark_start();
1341 HeapWord* start = hr->bottom();
1342
1343 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1344 "Preconditions not met - "
1345 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1346 p2i(start), p2i(ntams), p2i(hr->end()));
1347
1348 // Find the first marked object at or after "start".
1349 start = _bm->getNextMarkedWordAddress(start, ntams);
1350
1351 size_t marked_bytes = 0;
1352
1353 while (start < ntams) {
1354 oop obj = oop(start);
1355 int obj_sz = obj->size();
1356 HeapWord* obj_end = start + obj_sz;
1357
1358 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1359 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1360
1361 // Note: if we're looking at the last region in heap - obj_end
1362 // could be actually just beyond the end of the heap; end_idx
1363 // will then correspond to a (non-existent) card that is also
1364 // just beyond the heap.
1365 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1366 // end of object is not card aligned - increment to cover
1367 // all the cards spanned by the object
1368 end_idx += 1;
1369 }
1370
1371 // Set the bits in the card BM for the cards spanned by this object.
1372 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1373
1374 // Add the size of this object to the number of marked bytes.
1375 marked_bytes += (size_t)obj_sz * HeapWordSize;
1376
1377 // Find the next marked object after this one.
1378 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1379 }
1380
1381 // Mark the allocated-since-marking portion...
1382 HeapWord* top = hr->top();
1383 if (ntams < top) {
1384 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1385 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1386
1387 // Note: if we're looking at the last region in heap - top
1388 // could be actually just beyond the end of the heap; end_idx
1389 // will then correspond to a (non-existent) card that is also
1390 // just beyond the heap.
1391 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1392 // end of object is not card aligned - increment to cover
1393 // all the cards spanned by the object
1394 end_idx += 1;
1395 }
1396 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1429 BitMap* _exp_region_bm; // Expected Region BM values
1430 BitMap* _exp_card_bm; // Expected card BM values
1431
1432 int _failures;
1433
1434 public:
1435 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1436 BitMap* region_bm,
1437 BitMap* card_bm,
1438 BitMap* exp_region_bm,
1439 BitMap* exp_card_bm) :
1440 _g1h(g1h), _cm(g1h->concurrent_mark()),
1441 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1442 _region_bm(region_bm), _card_bm(card_bm),
1443 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1444 _failures(0) { }
1445
1446 int failures() const { return _failures; }
1447
1448 bool doHeapRegion(HeapRegion* hr) {
1449 if (hr->is_continues_humongous()) {
1450 // We will ignore these here and process them when their
1451 // associated "starts humongous" region is processed (see
1452 // set_bit_for_heap_region()). Note that we cannot rely on their
1453 // associated "starts humongous" region to have their bit set to
1454 // 1 since, due to the region chunking in the parallel region
1455 // iteration, a "continues humongous" region might be visited
1456 // before its associated "starts humongous".
1457 return false;
1458 }
1459
1460 int failures = 0;
1461
1462 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1463 // this region and set the corresponding bits in the expected region
1464 // and card bitmaps.
1465 bool res = _calc_cl.doHeapRegion(hr);
1466 assert(res == false, "should be continuing");
1467
1468 // Verify the marked bytes for this region.
1469 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1470 size_t act_marked_bytes = hr->next_marked_bytes();
1471
1472 // We're not OK if expected marked bytes > actual marked bytes. It means
1473 // we have missed accounting some objects during the actual marking.
1474 if (exp_marked_bytes > act_marked_bytes) {
1475 failures += 1;
1476 }
1477
1478 // Verify the bit, for this region, in the actual and expected
1479 // (which was just calculated) region bit maps.
1480 // We're not OK if the bit in the calculated expected region
1481 // bitmap is set and the bit in the actual region bitmap is not.
1482 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1483
1484 bool expected = _exp_region_bm->at(index);
1485 bool actual = _region_bm->at(index);
1486 if (expected && !actual) {
1487 failures += 1;
1488 }
1489
1490 // Verify that the card bit maps for the cards spanned by the current
1491 // region match. We have an error if we have a set bit in the expected
1492 // bit map and the corresponding bit in the actual bitmap is not set.
1493
1494 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1495 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1496
1556 }
1557
1558 int failures() const { return _failures; }
1559 };
1560
1561 // Closure that finalizes the liveness counting data.
1562 // Used during the cleanup pause.
1563 // Sets the bits corresponding to the interval [NTAMS, top]
1564 // (which contains the implicitly live objects) in the
1565 // card liveness bitmap. Also sets the bit for each region,
1566 // containing live data, in the region liveness bitmap.
1567
1568 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1569 public:
1570 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1571 BitMap* region_bm,
1572 BitMap* card_bm) :
1573 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1574
1575 bool doHeapRegion(HeapRegion* hr) {
1576
1577 if (hr->is_continues_humongous()) {
1578 // We will ignore these here and process them when their
1579 // associated "starts humongous" region is processed (see
1580 // set_bit_for_heap_region()). Note that we cannot rely on their
1581 // associated "starts humongous" region to have their bit set to
1582 // 1 since, due to the region chunking in the parallel region
1583 // iteration, a "continues humongous" region might be visited
1584 // before its associated "starts humongous".
1585 return false;
1586 }
1587
1588 HeapWord* ntams = hr->next_top_at_mark_start();
1589 HeapWord* top = hr->top();
1590
1591 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1592
1593 // Mark the allocated-since-marking portion...
1594 if (ntams < top) {
1595 // This definitely means the region has live objects.
1596 set_bit_for_region(hr);
1597
1598 // Now set the bits in the card bitmap for [ntams, top)
1599 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1600 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1601
1602 // Note: if we're looking at the last region in heap - top
1603 // could be actually just beyond the end of the heap; end_idx
1604 // will then correspond to a (non-existent) card that is also
1605 // just beyond the heap.
1606 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1607 // end of object is not card aligned - increment to cover
1664 HeapRegionSetCount _old_regions_removed;
1665 HeapRegionSetCount _humongous_regions_removed;
1666 HRRSCleanupTask* _hrrs_cleanup_task;
1667
1668 public:
1669 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1670 FreeRegionList* local_cleanup_list,
1671 HRRSCleanupTask* hrrs_cleanup_task) :
1672 _g1(g1),
1673 _freed_bytes(0),
1674 _local_cleanup_list(local_cleanup_list),
1675 _old_regions_removed(),
1676 _humongous_regions_removed(),
1677 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1678
1679 size_t freed_bytes() { return _freed_bytes; }
1680 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1681 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1682
1683 bool doHeapRegion(HeapRegion *hr) {
1684 if (hr->is_continues_humongous() || hr->is_archive()) {
1685 return false;
1686 }
1687 // We use a claim value of zero here because all regions
1688 // were claimed with value 1 in the FinalCount task.
1689 _g1->reset_gc_time_stamps(hr);
1690 hr->note_end_of_marking();
1691
1692 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1693 _freed_bytes += hr->used();
1694 hr->set_containing_set(NULL);
1695 if (hr->is_humongous()) {
1696 assert(hr->is_starts_humongous(), "we should only see starts humongous");
1697 _humongous_regions_removed.increment(1u, hr->capacity());
1698 _g1->free_humongous_region(hr, _local_cleanup_list, true);
1699 } else {
1700 _old_regions_removed.increment(1u, hr->capacity());
1701 _g1->free_region(hr, _local_cleanup_list, true);
1702 }
1703 } else {
1704 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1705 }
1706
1707 return false;
1708 }
1709 };
1710
1711 class G1ParNoteEndTask: public AbstractGangTask {
1712 friend class G1NoteEndOfConcMarkClosure;
1713
1714 protected:
1715 G1CollectedHeap* _g1h;
1716 FreeRegionList* _cleanup_list;
2325 }
2326 }
2327
2328 void ConcurrentMark::swapMarkBitMaps() {
2329 CMBitMapRO* temp = _prevMarkBitMap;
2330 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2331 _nextMarkBitMap = (CMBitMap*) temp;
2332 }
2333
2334 // Closure for marking entries in SATB buffers.
2335 class CMSATBBufferClosure : public SATBBufferClosure {
2336 private:
2337 CMTask* _task;
2338 G1CollectedHeap* _g1h;
2339
2340 // This is very similar to CMTask::deal_with_reference, but with
2341 // more relaxed requirements for the argument, so this must be more
2342 // circumspect about treating the argument as an object.
2343 void do_entry(void* entry) const {
2344 _task->increment_refs_reached();
2345 HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2346 if (entry < hr->next_top_at_mark_start()) {
2347 // Until we get here, we don't know whether entry refers to a valid
2348 // object; it could instead have been a stale reference.
2349 oop obj = static_cast<oop>(entry);
2350 assert(obj->is_oop(true /* ignore mark word */),
2351 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
2352 _task->make_reference_grey(obj, hr);
2353 }
2354 }
2355
2356 public:
2357 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2358 : _task(task), _g1h(g1h) { }
2359
2360 virtual void do_buffer(void** buffer, size_t size) {
2361 for (size_t i = 0; i < size; ++i) {
2362 do_entry(buffer[i]);
2363 }
2364 }
2365 };
2475 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2476 // Note we are overriding the read-only view of the prev map here, via
2477 // the cast.
2478 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2479 }
2480
2481 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2482 _nextMarkBitMap->clearRange(mr);
2483 }
2484
2485 HeapRegion*
2486 ConcurrentMark::claim_region(uint worker_id) {
2487 // "checkpoint" the finger
2488 HeapWord* finger = _finger;
2489
2490 // _heap_end will not change underneath our feet; it only changes at
2491 // yield points.
2492 while (finger < _heap_end) {
2493 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2494
2495 // Note on how this code handles humongous regions. In the
2496 // normal case the finger will reach the start of a "starts
2497 // humongous" (SH) region. Its end will either be the end of the
2498 // last "continues humongous" (CH) region in the sequence, or the
2499 // standard end of the SH region (if the SH is the only region in
2500 // the sequence). That way claim_region() will skip over the CH
2501 // regions. However, there is a subtle race between a CM thread
2502 // executing this method and a mutator thread doing a humongous
2503 // object allocation. The two are not mutually exclusive as the CM
2504 // thread does not need to hold the Heap_lock when it gets
2505 // here. So there is a chance that claim_region() will come across
2506 // a free region that's in the progress of becoming a SH or a CH
2507 // region. In the former case, it will either
2508 // a) Miss the update to the region's end, in which case it will
2509 // visit every subsequent CH region, will find their bitmaps
2510 // empty, and do nothing, or
2511 // b) Will observe the update of the region's end (in which case
2512 // it will skip the subsequent CH regions).
2513 // If it comes across a region that suddenly becomes CH, the
2514 // scenario will be similar to b). So, the race between
2515 // claim_region() and a humongous object allocation might force us
2516 // to do a bit of unnecessary work (due to some unnecessary bitmap
2517 // iterations) but it should not introduce and correctness issues.
2518 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2519
2520 // Above heap_region_containing_raw may return NULL as we always scan claim
2521 // until the end of the heap. In this case, just jump to the next region.
2522 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2523
2524 // Is the gap between reading the finger and doing the CAS too long?
2525 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2526 if (res == finger && curr_region != NULL) {
2527 // we succeeded
2528 HeapWord* bottom = curr_region->bottom();
2529 HeapWord* limit = curr_region->next_top_at_mark_start();
2530
2531 // notice that _finger == end cannot be guaranteed here since,
2532 // someone else might have moved the finger even further
2533 assert(_finger >= end, "the finger should have moved forward");
2534
2535 if (limit > bottom) {
2536 return curr_region;
2537 } else {
2538 assert(limit == bottom,
2539 "the region limit should be at bottom");
2540 // we return NULL and the caller should try calling
2576 };
2577
2578 void ConcurrentMark::verify_no_cset_oops() {
2579 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2580 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2581 return;
2582 }
2583
2584 // Verify entries on the global mark stack
2585 _markStack.iterate(VerifyNoCSetOops("Stack"));
2586
2587 // Verify entries on the task queues
2588 for (uint i = 0; i < _max_worker_id; ++i) {
2589 CMTaskQueue* queue = _task_queues->queue(i);
2590 queue->iterate(VerifyNoCSetOops("Queue", i));
2591 }
2592
2593 // Verify the global finger
2594 HeapWord* global_finger = finger();
2595 if (global_finger != NULL && global_finger < _heap_end) {
2596 // The global finger always points to a heap region boundary. We
2597 // use heap_region_containing_raw() to get the containing region
2598 // given that the global finger could be pointing to a free region
2599 // which subsequently becomes continues humongous. If that
2600 // happens, heap_region_containing() will return the bottom of the
2601 // corresponding starts humongous region and the check below will
2602 // not hold any more.
2603 // Since we always iterate over all regions, we might get a NULL HeapRegion
2604 // here.
2605 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2606 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2607 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2608 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2609 }
2610
2611 // Verify the task fingers
2612 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2613 for (uint i = 0; i < parallel_marking_threads(); ++i) {
2614 CMTask* task = _tasks[i];
2615 HeapWord* task_finger = task->finger();
2616 if (task_finger != NULL && task_finger < _heap_end) {
2617 // See above note on the global finger verification.
2618 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2619 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2620 !task_hr->in_collection_set(),
2621 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2622 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2623 }
2624 }
2625 }
2626 #endif // PRODUCT
2627
2628 // Aggregate the counting data that was constructed concurrently
2629 // with marking.
2630 class AggregateCountDataHRClosure: public HeapRegionClosure {
2631 G1CollectedHeap* _g1h;
2632 ConcurrentMark* _cm;
2633 CardTableModRefBS* _ct_bs;
2634 BitMap* _cm_card_bm;
2635 uint _max_worker_id;
2636
2637 public:
2638 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2639 BitMap* cm_card_bm,
2640 uint max_worker_id) :
2641 _g1h(g1h), _cm(g1h->concurrent_mark()),
2642 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2643 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2644
2645 bool doHeapRegion(HeapRegion* hr) {
2646 if (hr->is_continues_humongous()) {
2647 // We will ignore these here and process them when their
2648 // associated "starts humongous" region is processed.
2649 // Note that we cannot rely on their associated
2650 // "starts humongous" region to have their bit set to 1
2651 // since, due to the region chunking in the parallel region
2652 // iteration, a "continues humongous" region might be visited
2653 // before its associated "starts humongous".
2654 return false;
2655 }
2656
2657 HeapWord* start = hr->bottom();
2658 HeapWord* limit = hr->next_top_at_mark_start();
2659 HeapWord* end = hr->end();
2660
2661 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2662 "Preconditions not met - "
2663 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2664 "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2665 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
2666
2667 assert(hr->next_marked_bytes() == 0, "Precondition");
2668
2669 if (start == limit) {
2670 // NTAMS of this region has not been set so nothing to do.
2671 return false;
2672 }
2673
2674 // 'start' should be in the heap.
2675 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2676 // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2944
2945 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2946 ReferenceProcessor* result = NULL;
2947 if (G1UseConcMarkReferenceProcessing) {
2948 result = g1h->ref_processor_cm();
2949 assert(result != NULL, "should not be NULL");
2950 }
2951 return result;
2952 }
2953
2954 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2955 ConcurrentMark* cm,
2956 CMTask* task)
2957 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2958 _g1h(g1h), _cm(cm), _task(task)
2959 { }
2960
2961 void CMTask::setup_for_region(HeapRegion* hr) {
2962 assert(hr != NULL,
2963 "claim_region() should have filtered out NULL regions");
2964 assert(!hr->is_continues_humongous(),
2965 "claim_region() should have filtered out continues humongous regions");
2966 _curr_region = hr;
2967 _finger = hr->bottom();
2968 update_region_limit();
2969 }
2970
2971 void CMTask::update_region_limit() {
2972 HeapRegion* hr = _curr_region;
2973 HeapWord* bottom = hr->bottom();
2974 HeapWord* limit = hr->next_top_at_mark_start();
2975
2976 if (limit == bottom) {
2977 // The region was collected underneath our feet.
2978 // We set the finger to bottom to ensure that the bitmap
2979 // iteration that will follow this will not do anything.
2980 // (this is not a condition that holds when we set the region up,
2981 // as the region is not supposed to be empty in the first place)
2982 _finger = bottom;
2983 } else if (limit >= _region_limit) {
2984 assert(limit >= _finger, "peace of mind");
2985 } else {
|
789 if (cl.complete()) {
790 clear_all_count_data();
791 }
792
793 // Repeat the asserts from above.
794 guarantee(cmThread()->during_cycle(), "invariant");
795 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
796 }
797
798 class CheckBitmapClearHRClosure : public HeapRegionClosure {
799 CMBitMap* _bitmap;
800 bool _error;
801 public:
802 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
803 }
804
805 virtual bool doHeapRegion(HeapRegion* r) {
806 // This closure can be called concurrently to the mutator, so we must make sure
807 // that the result of the getNextMarkedWordAddress() call is compared to the
808 // value passed to it as limit to detect any found bits.
809 // end never changes in G1.
810 HeapWord* end = r->end();
811 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
812 }
813 };
814
815 bool ConcurrentMark::nextMarkBitmapIsClear() {
816 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
817 _g1h->heap_region_iterate(&cl);
818 return cl.complete();
819 }
820
821 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
822 public:
823 bool doHeapRegion(HeapRegion* r) {
824 r->note_start_of_marking();
825 return false;
826 }
827 };
828
829 void ConcurrentMark::checkpointRootsInitialPre() {
830 G1CollectedHeap* g1h = G1CollectedHeap::heap();
831 G1CollectorPolicy* g1p = g1h->g1_policy();
832
833 _has_aborted = false;
834
835 // Initialize marking structures. This has to be done in a STW phase.
836 reset();
837
838 // For each region note start of marking.
839 NoteStartOfMarkHRClosure startcl;
840 g1h->heap_region_iterate(&startcl);
841 }
842
843
844 void ConcurrentMark::checkpointRootsInitialPost() {
1263
1264 g1p->record_concurrent_mark_remark_end();
1265
1266 G1CMIsAliveClosure is_alive(g1h);
1267 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1268 }
1269
1270 // Base class of the closures that finalize and verify the
1271 // liveness counting data.
1272 class CMCountDataClosureBase: public HeapRegionClosure {
1273 protected:
1274 G1CollectedHeap* _g1h;
1275 ConcurrentMark* _cm;
1276 CardTableModRefBS* _ct_bs;
1277
1278 BitMap* _region_bm;
1279 BitMap* _card_bm;
1280
1281 // Takes a region that's not empty (i.e., it has at least one
1282 // live object in it and sets its corresponding bit on the region
1283 // bitmap to 1.
1284 void set_bit_for_region(HeapRegion* hr) {
1285 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1286 _region_bm->par_at_put(index, true);
1287 }
1288
1289 public:
1290 CMCountDataClosureBase(G1CollectedHeap* g1h,
1291 BitMap* region_bm, BitMap* card_bm):
1292 _g1h(g1h), _cm(g1h->concurrent_mark()),
1293 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1294 _region_bm(region_bm), _card_bm(card_bm) { }
1295 };
1296
1297 // Closure that calculates the # live objects per region. Used
1298 // for verification purposes during the cleanup pause.
1299 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1300 CMBitMapRO* _bm;
1301 size_t _region_marked_bytes;
1302
1303 public:
1304 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1305 BitMap* region_bm, BitMap* card_bm) :
1306 CMCountDataClosureBase(g1h, region_bm, card_bm),
1307 _bm(bm), _region_marked_bytes(0) { }
1308
1309 bool doHeapRegion(HeapRegion* hr) {
1310 HeapWord* ntams = hr->next_top_at_mark_start();
1311 HeapWord* start = hr->bottom();
1312
1313 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1314 "Preconditions not met - "
1315 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1316 p2i(start), p2i(ntams), p2i(hr->end()));
1317
1318 // Find the first marked object at or after "start".
1319 start = _bm->getNextMarkedWordAddress(start, ntams);
1320
1321 size_t marked_bytes = 0;
1322
1323 while (start < ntams) {
1324 oop obj = oop(start);
1325 int obj_sz = obj->size();
1326 HeapWord* obj_end = start + obj_sz;
1327
1328 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1329 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1330
1331 // Note: if we're looking at the last region in heap - obj_end
1332 // could be actually just beyond the end of the heap; end_idx
1333 // will then correspond to a (non-existent) card that is also
1334 // just beyond the heap.
1335 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1336 // end of object is not card aligned - increment to cover
1337 // all the cards spanned by the object
1338 end_idx += 1;
1339 }
1340
1341 // Set the bits in the card BM for the cards spanned by this object.
1342 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1343
1344 // Add the size of this object to the number of marked bytes.
1345 marked_bytes += (size_t)obj_sz * HeapWordSize;
1346
1347 // This will happen if we are handling a humongous object that spans
1348 // several heap regions.
1349 if (obj_end > hr->end()) {
1350 break;
1351 }
1352 // Find the next marked object after this one.
1353 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1354 }
1355
1356 // Mark the allocated-since-marking portion...
1357 HeapWord* top = hr->top();
1358 if (ntams < top) {
1359 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1360 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1361
1362 // Note: if we're looking at the last region in heap - top
1363 // could be actually just beyond the end of the heap; end_idx
1364 // will then correspond to a (non-existent) card that is also
1365 // just beyond the heap.
1366 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1367 // end of object is not card aligned - increment to cover
1368 // all the cards spanned by the object
1369 end_idx += 1;
1370 }
1371 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1404 BitMap* _exp_region_bm; // Expected Region BM values
1405 BitMap* _exp_card_bm; // Expected card BM values
1406
1407 int _failures;
1408
1409 public:
1410 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1411 BitMap* region_bm,
1412 BitMap* card_bm,
1413 BitMap* exp_region_bm,
1414 BitMap* exp_card_bm) :
1415 _g1h(g1h), _cm(g1h->concurrent_mark()),
1416 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1417 _region_bm(region_bm), _card_bm(card_bm),
1418 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1419 _failures(0) { }
1420
1421 int failures() const { return _failures; }
1422
1423 bool doHeapRegion(HeapRegion* hr) {
1424 int failures = 0;
1425
1426 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1427 // this region and set the corresponding bits in the expected region
1428 // and card bitmaps.
1429 bool res = _calc_cl.doHeapRegion(hr);
1430 assert(res == false, "should be continuing");
1431
1432 // Verify the marked bytes for this region.
1433 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1434 size_t act_marked_bytes = hr->next_marked_bytes();
1435
1436 if (exp_marked_bytes > act_marked_bytes) {
1437 if (hr->is_starts_humongous()) {
1438 // For start_humongous regions, the size of the whole object will be
1439 // in exp_marked_bytes.
1440 HeapRegion* region = hr;
1441 int num_regions;
1442 for (num_regions = 0; region != NULL; num_regions++) {
1443 region = _g1h->next_region_in_humongous(region);
1444 }
1445 if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) {
1446 failures += 1;
1447 } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) {
1448 failures += 1;
1449 }
1450 } else {
1451 // We're not OK if expected marked bytes > actual marked bytes. It means
1452 // we have missed accounting some objects during the actual marking.
1453 failures += 1;
1454 }
1455 }
1456
1457 // Verify the bit, for this region, in the actual and expected
1458 // (which was just calculated) region bit maps.
1459 // We're not OK if the bit in the calculated expected region
1460 // bitmap is set and the bit in the actual region bitmap is not.
1461 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1462
1463 bool expected = _exp_region_bm->at(index);
1464 bool actual = _region_bm->at(index);
1465 if (expected && !actual) {
1466 failures += 1;
1467 }
1468
1469 // Verify that the card bit maps for the cards spanned by the current
1470 // region match. We have an error if we have a set bit in the expected
1471 // bit map and the corresponding bit in the actual bitmap is not set.
1472
1473 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1474 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1475
1535 }
1536
1537 int failures() const { return _failures; }
1538 };
1539
1540 // Closure that finalizes the liveness counting data.
1541 // Used during the cleanup pause.
1542 // Sets the bits corresponding to the interval [NTAMS, top]
1543 // (which contains the implicitly live objects) in the
1544 // card liveness bitmap. Also sets the bit for each region,
1545 // containing live data, in the region liveness bitmap.
1546
1547 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1548 public:
1549 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1550 BitMap* region_bm,
1551 BitMap* card_bm) :
1552 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1553
1554 bool doHeapRegion(HeapRegion* hr) {
1555 HeapWord* ntams = hr->next_top_at_mark_start();
1556 HeapWord* top = hr->top();
1557
1558 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1559
1560 // Mark the allocated-since-marking portion...
1561 if (ntams < top) {
1562 // This definitely means the region has live objects.
1563 set_bit_for_region(hr);
1564
1565 // Now set the bits in the card bitmap for [ntams, top)
1566 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1567 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1568
1569 // Note: if we're looking at the last region in heap - top
1570 // could be actually just beyond the end of the heap; end_idx
1571 // will then correspond to a (non-existent) card that is also
1572 // just beyond the heap.
1573 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1574 // end of object is not card aligned - increment to cover
1631 HeapRegionSetCount _old_regions_removed;
1632 HeapRegionSetCount _humongous_regions_removed;
1633 HRRSCleanupTask* _hrrs_cleanup_task;
1634
1635 public:
1636 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1637 FreeRegionList* local_cleanup_list,
1638 HRRSCleanupTask* hrrs_cleanup_task) :
1639 _g1(g1),
1640 _freed_bytes(0),
1641 _local_cleanup_list(local_cleanup_list),
1642 _old_regions_removed(),
1643 _humongous_regions_removed(),
1644 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1645
1646 size_t freed_bytes() { return _freed_bytes; }
1647 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1648 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1649
1650 bool doHeapRegion(HeapRegion *hr) {
1651 if (hr->is_archive()) {
1652 return false;
1653 }
1654 // We use a claim value of zero here because all regions
1655 // were claimed with value 1 in the FinalCount task.
1656 _g1->reset_gc_time_stamps(hr);
1657 hr->note_end_of_marking();
1658
1659 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1660 _freed_bytes += hr->used();
1661 hr->set_containing_set(NULL);
1662 if (hr->is_humongous()) {
1663 _humongous_regions_removed.increment(1u, hr->capacity());
1664 _g1->free_humongous_region(hr, _local_cleanup_list, true);
1665 } else {
1666 _old_regions_removed.increment(1u, hr->capacity());
1667 _g1->free_region(hr, _local_cleanup_list, true);
1668 }
1669 } else {
1670 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1671 }
1672
1673 return false;
1674 }
1675 };
1676
1677 class G1ParNoteEndTask: public AbstractGangTask {
1678 friend class G1NoteEndOfConcMarkClosure;
1679
1680 protected:
1681 G1CollectedHeap* _g1h;
1682 FreeRegionList* _cleanup_list;
2291 }
2292 }
2293
2294 void ConcurrentMark::swapMarkBitMaps() {
2295 CMBitMapRO* temp = _prevMarkBitMap;
2296 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2297 _nextMarkBitMap = (CMBitMap*) temp;
2298 }
2299
2300 // Closure for marking entries in SATB buffers.
2301 class CMSATBBufferClosure : public SATBBufferClosure {
2302 private:
2303 CMTask* _task;
2304 G1CollectedHeap* _g1h;
2305
2306 // This is very similar to CMTask::deal_with_reference, but with
2307 // more relaxed requirements for the argument, so this must be more
2308 // circumspect about treating the argument as an object.
2309 void do_entry(void* entry) const {
2310 _task->increment_refs_reached();
2311 HeapRegion* hr = _g1h->heap_region_containing(entry);
2312 if (entry < hr->next_top_at_mark_start()) {
2313 // Until we get here, we don't know whether entry refers to a valid
2314 // object; it could instead have been a stale reference.
2315 oop obj = static_cast<oop>(entry);
2316 assert(obj->is_oop(true /* ignore mark word */),
2317 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
2318 _task->make_reference_grey(obj, hr);
2319 }
2320 }
2321
2322 public:
2323 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2324 : _task(task), _g1h(g1h) { }
2325
2326 virtual void do_buffer(void** buffer, size_t size) {
2327 for (size_t i = 0; i < size; ++i) {
2328 do_entry(buffer[i]);
2329 }
2330 }
2331 };
2441 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2442 // Note we are overriding the read-only view of the prev map here, via
2443 // the cast.
2444 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2445 }
2446
2447 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2448 _nextMarkBitMap->clearRange(mr);
2449 }
2450
2451 HeapRegion*
2452 ConcurrentMark::claim_region(uint worker_id) {
2453 // "checkpoint" the finger
2454 HeapWord* finger = _finger;
2455
2456 // _heap_end will not change underneath our feet; it only changes at
2457 // yield points.
2458 while (finger < _heap_end) {
2459 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2460
2461 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
2462
2463 // Above heap_region_containing may return NULL as we always scan claim
2464 // until the end of the heap. In this case, just jump to the next region.
2465 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2466
2467 // Is the gap between reading the finger and doing the CAS too long?
2468 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2469 if (res == finger && curr_region != NULL) {
2470 // we succeeded
2471 HeapWord* bottom = curr_region->bottom();
2472 HeapWord* limit = curr_region->next_top_at_mark_start();
2473
2474 // notice that _finger == end cannot be guaranteed here since,
2475 // someone else might have moved the finger even further
2476 assert(_finger >= end, "the finger should have moved forward");
2477
2478 if (limit > bottom) {
2479 return curr_region;
2480 } else {
2481 assert(limit == bottom,
2482 "the region limit should be at bottom");
2483 // we return NULL and the caller should try calling
2519 };
2520
2521 void ConcurrentMark::verify_no_cset_oops() {
2522 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2523 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2524 return;
2525 }
2526
2527 // Verify entries on the global mark stack
2528 _markStack.iterate(VerifyNoCSetOops("Stack"));
2529
2530 // Verify entries on the task queues
2531 for (uint i = 0; i < _max_worker_id; ++i) {
2532 CMTaskQueue* queue = _task_queues->queue(i);
2533 queue->iterate(VerifyNoCSetOops("Queue", i));
2534 }
2535
2536 // Verify the global finger
2537 HeapWord* global_finger = finger();
2538 if (global_finger != NULL && global_finger < _heap_end) {
2539 // Since we always iterate over all regions, we might get a NULL HeapRegion
2540 // here.
2541 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2542 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2543 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2544 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2545 }
2546
2547 // Verify the task fingers
2548 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2549 for (uint i = 0; i < parallel_marking_threads(); ++i) {
2550 CMTask* task = _tasks[i];
2551 HeapWord* task_finger = task->finger();
2552 if (task_finger != NULL && task_finger < _heap_end) {
2553 // See above note on the global finger verification.
2554 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2555 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2556 !task_hr->in_collection_set(),
2557 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2558 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2559 }
2560 }
2561 }
2562 #endif // PRODUCT
2563
2564 // Aggregate the counting data that was constructed concurrently
2565 // with marking.
2566 class AggregateCountDataHRClosure: public HeapRegionClosure {
2567 G1CollectedHeap* _g1h;
2568 ConcurrentMark* _cm;
2569 CardTableModRefBS* _ct_bs;
2570 BitMap* _cm_card_bm;
2571 uint _max_worker_id;
2572
2573 public:
2574 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2575 BitMap* cm_card_bm,
2576 uint max_worker_id) :
2577 _g1h(g1h), _cm(g1h->concurrent_mark()),
2578 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2579 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2580
2581 bool doHeapRegion(HeapRegion* hr) {
2582 HeapWord* start = hr->bottom();
2583 HeapWord* limit = hr->next_top_at_mark_start();
2584 HeapWord* end = hr->end();
2585
2586 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2587 "Preconditions not met - "
2588 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2589 "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2590 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
2591
2592 assert(hr->next_marked_bytes() == 0, "Precondition");
2593
2594 if (start == limit) {
2595 // NTAMS of this region has not been set so nothing to do.
2596 return false;
2597 }
2598
2599 // 'start' should be in the heap.
2600 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2601 // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2869
2870 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2871 ReferenceProcessor* result = NULL;
2872 if (G1UseConcMarkReferenceProcessing) {
2873 result = g1h->ref_processor_cm();
2874 assert(result != NULL, "should not be NULL");
2875 }
2876 return result;
2877 }
2878
2879 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2880 ConcurrentMark* cm,
2881 CMTask* task)
2882 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2883 _g1h(g1h), _cm(cm), _task(task)
2884 { }
2885
2886 void CMTask::setup_for_region(HeapRegion* hr) {
2887 assert(hr != NULL,
2888 "claim_region() should have filtered out NULL regions");
2889 _curr_region = hr;
2890 _finger = hr->bottom();
2891 update_region_limit();
2892 }
2893
2894 void CMTask::update_region_limit() {
2895 HeapRegion* hr = _curr_region;
2896 HeapWord* bottom = hr->bottom();
2897 HeapWord* limit = hr->next_top_at_mark_start();
2898
2899 if (limit == bottom) {
2900 // The region was collected underneath our feet.
2901 // We set the finger to bottom to ensure that the bitmap
2902 // iteration that will follow this will not do anything.
2903 // (this is not a condition that holds when we set the region up,
2904 // as the region is not supposed to be empty in the first place)
2905 _finger = bottom;
2906 } else if (limit >= _region_limit) {
2907 assert(limit >= _finger, "peace of mind");
2908 } else {
|