< prev index next >

src/share/vm/gc/g1/concurrentMark.cpp

Print this page




 806   if (cl.complete()) {
 807     clear_all_count_data();
 808   }
 809 
 810   // Repeat the asserts from above.
 811   guarantee(cmThread()->during_cycle(), "invariant");
 812   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 813 }
 814 
 815 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 816   CMBitMap* _bitmap;
 817   bool _error;
 818  public:
 819   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 820   }
 821 
 822   virtual bool doHeapRegion(HeapRegion* r) {
 823     // This closure can be called concurrently to the mutator, so we must make sure
 824     // that the result of the getNextMarkedWordAddress() call is compared to the
 825     // value passed to it as limit to detect any found bits.
 826     // We can use the region's orig_end() for the limit and the comparison value
 827     // as it always contains the "real" end of the region that never changes and
 828     // has no side effects.
 829     // Due to the latter, there can also be no problem with the compiler generating
 830     // reloads of the orig_end() call.
 831     HeapWord* end = r->orig_end();
 832     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 833   }
 834 };
 835 
 836 bool ConcurrentMark::nextMarkBitmapIsClear() {
 837   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 838   _g1h->heap_region_iterate(&cl);
 839   return cl.complete();
 840 }
 841 
 842 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 843 public:
 844   bool doHeapRegion(HeapRegion* r) {
 845     if (!r->is_continues_humongous()) {
 846       r->note_start_of_marking();
 847     }
 848     return false;
 849   }
 850 };
 851 
 852 void ConcurrentMark::checkpointRootsInitialPre() {
 853   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 854   G1CollectorPolicy* g1p = g1h->g1_policy();
 855 
 856   _has_aborted = false;
 857 
 858   // Initialize marking structures. This has to be done in a STW phase.
 859   reset();
 860 
 861   // For each region note start of marking.
 862   NoteStartOfMarkHRClosure startcl;
 863   g1h->heap_region_iterate(&startcl);
 864 }
 865 
 866 
 867 void ConcurrentMark::checkpointRootsInitialPost() {


1315 
1316   g1p->record_concurrent_mark_remark_end();
1317 
1318   G1CMIsAliveClosure is_alive(g1h);
1319   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1320 }
1321 
1322 // Base class of the closures that finalize and verify the
1323 // liveness counting data.
1324 class CMCountDataClosureBase: public HeapRegionClosure {
1325 protected:
1326   G1CollectedHeap* _g1h;
1327   ConcurrentMark* _cm;
1328   CardTableModRefBS* _ct_bs;
1329 
1330   BitMap* _region_bm;
1331   BitMap* _card_bm;
1332 
1333   // Takes a region that's not empty (i.e., it has at least one
1334   // live object in it and sets its corresponding bit on the region
1335   // bitmap to 1. If the region is "starts humongous" it will also set
1336   // to 1 the bits on the region bitmap that correspond to its
1337   // associated "continues humongous" regions.
1338   void set_bit_for_region(HeapRegion* hr) {
1339     assert(!hr->is_continues_humongous(), "should have filtered those out");
1340 
1341     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
1342     if (!hr->is_starts_humongous()) {
1343       // Normal (non-humongous) case: just set the bit.
1344       _region_bm->par_at_put(index, true);
1345     } else {
1346       // Starts humongous case: calculate how many regions are part of
1347       // this humongous region and then set the bit range.
1348       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1349       _region_bm->par_at_put_range(index, end_index, true);
1350     }
1351   }
1352 
1353 public:
1354   CMCountDataClosureBase(G1CollectedHeap* g1h,
1355                          BitMap* region_bm, BitMap* card_bm):
1356     _g1h(g1h), _cm(g1h->concurrent_mark()),
1357     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1358     _region_bm(region_bm), _card_bm(card_bm) { }
1359 };
1360 
1361 // Closure that calculates the # live objects per region. Used
1362 // for verification purposes during the cleanup pause.
1363 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1364   CMBitMapRO* _bm;
1365   size_t _region_marked_bytes;
1366 
1367 public:
1368   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1369                          BitMap* region_bm, BitMap* card_bm) :
1370     CMCountDataClosureBase(g1h, region_bm, card_bm),
1371     _bm(bm), _region_marked_bytes(0) { }
1372 
1373   bool doHeapRegion(HeapRegion* hr) {
1374 
1375     if (hr->is_continues_humongous()) {
1376       // We will ignore these here and process them when their
1377       // associated "starts humongous" region is processed (see
1378       // set_bit_for_heap_region()). Note that we cannot rely on their
1379       // associated "starts humongous" region to have their bit set to
1380       // 1 since, due to the region chunking in the parallel region
1381       // iteration, a "continues humongous" region might be visited
1382       // before its associated "starts humongous".
1383       return false;
1384     }
1385 
1386     HeapWord* ntams = hr->next_top_at_mark_start();
1387     HeapWord* start = hr->bottom();
1388 
1389     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1390            "Preconditions not met - "
1391            "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1392            p2i(start), p2i(ntams), p2i(hr->end()));
1393 
1394     // Find the first marked object at or after "start".
1395     start = _bm->getNextMarkedWordAddress(start, ntams);
1396 
1397     size_t marked_bytes = 0;
1398 
1399     while (start < ntams) {
1400       oop obj = oop(start);
1401       int obj_sz = obj->size();
1402       HeapWord* obj_end = start + obj_sz;
1403 
1404       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1405       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1406 
1407       // Note: if we're looking at the last region in heap - obj_end
1408       // could be actually just beyond the end of the heap; end_idx
1409       // will then correspond to a (non-existent) card that is also
1410       // just beyond the heap.
1411       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1412         // end of object is not card aligned - increment to cover
1413         // all the cards spanned by the object
1414         end_idx += 1;
1415       }
1416 
1417       // Set the bits in the card BM for the cards spanned by this object.
1418       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1419 
1420       // Add the size of this object to the number of marked bytes.
1421       marked_bytes += (size_t)obj_sz * HeapWordSize;
1422 



1423       // Find the next marked object after this one.
1424       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1425     }
1426 
1427     // Mark the allocated-since-marking portion...
1428     HeapWord* top = hr->top();
1429     if (ntams < top) {
1430       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1431       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1432 
1433       // Note: if we're looking at the last region in heap - top
1434       // could be actually just beyond the end of the heap; end_idx
1435       // will then correspond to a (non-existent) card that is also
1436       // just beyond the heap.
1437       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1438         // end of object is not card aligned - increment to cover
1439         // all the cards spanned by the object
1440         end_idx += 1;
1441       }
1442       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);


1477   BitMap* _exp_card_bm;   // Expected card BM values
1478 
1479   int _failures;
1480 
1481 public:
1482   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1483                                 BitMap* region_bm,
1484                                 BitMap* card_bm,
1485                                 BitMap* exp_region_bm,
1486                                 BitMap* exp_card_bm,
1487                                 bool verbose) :
1488     _g1h(g1h), _cm(g1h->concurrent_mark()),
1489     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1490     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1491     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1492     _failures(0) { }
1493 
1494   int failures() const { return _failures; }
1495 
1496   bool doHeapRegion(HeapRegion* hr) {
1497     if (hr->is_continues_humongous()) {
1498       // We will ignore these here and process them when their
1499       // associated "starts humongous" region is processed (see
1500       // set_bit_for_heap_region()). Note that we cannot rely on their
1501       // associated "starts humongous" region to have their bit set to
1502       // 1 since, due to the region chunking in the parallel region
1503       // iteration, a "continues humongous" region might be visited
1504       // before its associated "starts humongous".
1505       return false;
1506     }
1507 
1508     int failures = 0;
1509 
1510     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1511     // this region and set the corresponding bits in the expected region
1512     // and card bitmaps.
1513     bool res = _calc_cl.doHeapRegion(hr);
1514     assert(res == false, "should be continuing");
1515 
1516     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1517                     Mutex::_no_safepoint_check_flag);
1518 
1519     // Verify the marked bytes for this region.
1520     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1521     size_t act_marked_bytes = hr->next_marked_bytes();
1522 
1523     // We're not OK if expected marked bytes > actual marked bytes. It means
1524     // we have missed accounting some objects during the actual marking.
1525     if (exp_marked_bytes > act_marked_bytes) {
1526       if (_verbose) {
1527         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "


1635   }
1636 
1637   int failures() const { return _failures; }
1638 };
1639 
1640 // Closure that finalizes the liveness counting data.
1641 // Used during the cleanup pause.
1642 // Sets the bits corresponding to the interval [NTAMS, top]
1643 // (which contains the implicitly live objects) in the
1644 // card liveness bitmap. Also sets the bit for each region,
1645 // containing live data, in the region liveness bitmap.
1646 
1647 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1648  public:
1649   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1650                               BitMap* region_bm,
1651                               BitMap* card_bm) :
1652     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1653 
1654   bool doHeapRegion(HeapRegion* hr) {
1655 
1656     if (hr->is_continues_humongous()) {
1657       // We will ignore these here and process them when their
1658       // associated "starts humongous" region is processed (see
1659       // set_bit_for_heap_region()). Note that we cannot rely on their
1660       // associated "starts humongous" region to have their bit set to
1661       // 1 since, due to the region chunking in the parallel region
1662       // iteration, a "continues humongous" region might be visited
1663       // before its associated "starts humongous".
1664       return false;
1665     }
1666 
1667     HeapWord* ntams = hr->next_top_at_mark_start();
1668     HeapWord* top   = hr->top();
1669 
1670     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1671 
1672     // Mark the allocated-since-marking portion...
1673     if (ntams < top) {
1674       // This definitely means the region has live objects.
1675       set_bit_for_region(hr);
1676 
1677       // Now set the bits in the card bitmap for [ntams, top)
1678       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1679       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1680 
1681       // Note: if we're looking at the last region in heap - top
1682       // could be actually just beyond the end of the heap; end_idx
1683       // will then correspond to a (non-existent) card that is also
1684       // just beyond the heap.
1685       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1686         // end of object is not card aligned - increment to cover


1743   HeapRegionSetCount _old_regions_removed;
1744   HeapRegionSetCount _humongous_regions_removed;
1745   HRRSCleanupTask* _hrrs_cleanup_task;
1746 
1747 public:
1748   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1749                              FreeRegionList* local_cleanup_list,
1750                              HRRSCleanupTask* hrrs_cleanup_task) :
1751     _g1(g1),
1752     _freed_bytes(0),
1753     _local_cleanup_list(local_cleanup_list),
1754     _old_regions_removed(),
1755     _humongous_regions_removed(),
1756     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1757 
1758   size_t freed_bytes() { return _freed_bytes; }
1759   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1760   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1761 
1762   bool doHeapRegion(HeapRegion *hr) {
1763     if (hr->is_continues_humongous() || hr->is_archive()) {
1764       return false;
1765     }
1766     // We use a claim value of zero here because all regions
1767     // were claimed with value 1 in the FinalCount task.
1768     _g1->reset_gc_time_stamps(hr);
1769     hr->note_end_of_marking();
1770 
1771     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1772       _freed_bytes += hr->used();
1773       hr->set_containing_set(NULL);
1774       if (hr->is_humongous()) {
1775         assert(hr->is_starts_humongous(), "we should only see starts humongous");
1776         _humongous_regions_removed.increment(1u, hr->capacity());
1777         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1778       } else {
1779         _old_regions_removed.increment(1u, hr->capacity());
1780         _g1->free_region(hr, _local_cleanup_list, true);
1781       }
1782     } else {
1783       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1784     }
1785 
1786     return false;
1787   }
1788 };
1789 
1790 class G1ParNoteEndTask: public AbstractGangTask {
1791   friend class G1NoteEndOfConcMarkClosure;
1792 
1793 protected:
1794   G1CollectedHeap* _g1h;
1795   FreeRegionList* _cleanup_list;


2419   }
2420 }
2421 
2422 void ConcurrentMark::swapMarkBitMaps() {
2423   CMBitMapRO* temp = _prevMarkBitMap;
2424   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2425   _nextMarkBitMap  = (CMBitMap*)  temp;
2426 }
2427 
2428 // Closure for marking entries in SATB buffers.
2429 class CMSATBBufferClosure : public SATBBufferClosure {
2430 private:
2431   CMTask* _task;
2432   G1CollectedHeap* _g1h;
2433 
2434   // This is very similar to CMTask::deal_with_reference, but with
2435   // more relaxed requirements for the argument, so this must be more
2436   // circumspect about treating the argument as an object.
2437   void do_entry(void* entry) const {
2438     _task->increment_refs_reached();
2439     HeapRegion* hr = _g1h->heap_region_containing_raw(entry);
2440     if (entry < hr->next_top_at_mark_start()) {
2441       // Until we get here, we don't know whether entry refers to a valid
2442       // object; it could instead have been a stale reference.
2443       oop obj = static_cast<oop>(entry);
2444       assert(obj->is_oop(true /* ignore mark word */),
2445              "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
2446       _task->make_reference_grey(obj, hr);
2447     }
2448   }
2449 
2450 public:
2451   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2452     : _task(task), _g1h(g1h) { }
2453 
2454   virtual void do_buffer(void** buffer, size_t size) {
2455     for (size_t i = 0; i < size; ++i) {
2456       do_entry(buffer[i]);
2457     }
2458   }
2459 };


2592     // last "continues humongous" (CH) region in the sequence, or the
2593     // standard end of the SH region (if the SH is the only region in
2594     // the sequence). That way claim_region() will skip over the CH
2595     // regions. However, there is a subtle race between a CM thread
2596     // executing this method and a mutator thread doing a humongous
2597     // object allocation. The two are not mutually exclusive as the CM
2598     // thread does not need to hold the Heap_lock when it gets
2599     // here. So there is a chance that claim_region() will come across
2600     // a free region that's in the progress of becoming a SH or a CH
2601     // region. In the former case, it will either
2602     //   a) Miss the update to the region's end, in which case it will
2603     //      visit every subsequent CH region, will find their bitmaps
2604     //      empty, and do nothing, or
2605     //   b) Will observe the update of the region's end (in which case
2606     //      it will skip the subsequent CH regions).
2607     // If it comes across a region that suddenly becomes CH, the
2608     // scenario will be similar to b). So, the race between
2609     // claim_region() and a humongous object allocation might force us
2610     // to do a bit of unnecessary work (due to some unnecessary bitmap
2611     // iterations) but it should not introduce and correctness issues.
2612     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2613 
2614     // Above heap_region_containing_raw may return NULL as we always scan claim
2615     // until the end of the heap. In this case, just jump to the next region.
2616     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2617 
2618     // Is the gap between reading the finger and doing the CAS too long?
2619     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2620     if (res == finger && curr_region != NULL) {
2621       // we succeeded
2622       HeapWord*   bottom        = curr_region->bottom();
2623       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2624 
2625       if (verbose_low()) {
2626         gclog_or_tty->print_cr("[%u] curr_region = " PTR_FORMAT " "
2627                                "[" PTR_FORMAT ", " PTR_FORMAT "), "
2628                                "limit = " PTR_FORMAT,
2629                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2630       }
2631 
2632       // notice that _finger == end cannot be guaranteed here since,
2633       // someone else might have moved the finger even further
2634       assert(_finger >= end, "the finger should have moved forward");


2705 
2706 void ConcurrentMark::verify_no_cset_oops() {
2707   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2708   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2709     return;
2710   }
2711 
2712   // Verify entries on the global mark stack
2713   _markStack.iterate(VerifyNoCSetOops("Stack"));
2714 
2715   // Verify entries on the task queues
2716   for (uint i = 0; i < _max_worker_id; ++i) {
2717     CMTaskQueue* queue = _task_queues->queue(i);
2718     queue->iterate(VerifyNoCSetOops("Queue", i));
2719   }
2720 
2721   // Verify the global finger
2722   HeapWord* global_finger = finger();
2723   if (global_finger != NULL && global_finger < _heap_end) {
2724     // The global finger always points to a heap region boundary. We
2725     // use heap_region_containing_raw() to get the containing region
2726     // given that the global finger could be pointing to a free region
2727     // which subsequently becomes continues humongous. If that
2728     // happens, heap_region_containing() will return the bottom of the
2729     // corresponding starts humongous region and the check below will
2730     // not hold any more.
2731     // Since we always iterate over all regions, we might get a NULL HeapRegion
2732     // here.
2733     HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
2734     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2735               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2736               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2737   }
2738 
2739   // Verify the task fingers
2740   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2741   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2742     CMTask* task = _tasks[i];
2743     HeapWord* task_finger = task->finger();
2744     if (task_finger != NULL && task_finger < _heap_end) {
2745       // See above note on the global finger verification.
2746       HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
2747       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2748                 !task_hr->in_collection_set(),
2749                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2750                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2751     }
2752   }
2753 }
2754 #endif // PRODUCT
2755 
2756 // Aggregate the counting data that was constructed concurrently
2757 // with marking.
2758 class AggregateCountDataHRClosure: public HeapRegionClosure {
2759   G1CollectedHeap* _g1h;
2760   ConcurrentMark* _cm;
2761   CardTableModRefBS* _ct_bs;
2762   BitMap* _cm_card_bm;
2763   uint _max_worker_id;
2764 
2765  public:
2766   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2767                               BitMap* cm_card_bm,
2768                               uint max_worker_id) :
2769     _g1h(g1h), _cm(g1h->concurrent_mark()),
2770     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2771     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2772 
2773   bool doHeapRegion(HeapRegion* hr) {
2774     if (hr->is_continues_humongous()) {
2775       // We will ignore these here and process them when their
2776       // associated "starts humongous" region is processed.
2777       // Note that we cannot rely on their associated
2778       // "starts humongous" region to have their bit set to 1
2779       // since, due to the region chunking in the parallel region
2780       // iteration, a "continues humongous" region might be visited
2781       // before its associated "starts humongous".
2782       return false;
2783     }
2784 
2785     HeapWord* start = hr->bottom();
2786     HeapWord* limit = hr->next_top_at_mark_start();
2787     HeapWord* end = hr->end();
2788 
2789     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2790            "Preconditions not met - "
2791            "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2792            "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2793            p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
2794 
2795     assert(hr->next_marked_bytes() == 0, "Precondition");
2796 
2797     if (start == limit) {
2798       // NTAMS of this region has not been set so nothing to do.
2799       return false;
2800     }
2801 
2802     // 'start' should be in the heap.
2803     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2804     // 'end' *may* be just beyond the end of the heap (if hr is the last region)


3084 
3085 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
3086   ReferenceProcessor* result = NULL;
3087   if (G1UseConcMarkReferenceProcessing) {
3088     result = g1h->ref_processor_cm();
3089     assert(result != NULL, "should not be NULL");
3090   }
3091   return result;
3092 }
3093 
3094 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3095                                ConcurrentMark* cm,
3096                                CMTask* task)
3097   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
3098     _g1h(g1h), _cm(cm), _task(task)
3099 { }
3100 
3101 void CMTask::setup_for_region(HeapRegion* hr) {
3102   assert(hr != NULL,
3103         "claim_region() should have filtered out NULL regions");
3104   assert(!hr->is_continues_humongous(),
3105         "claim_region() should have filtered out continues humongous regions");
3106 
3107   if (_cm->verbose_low()) {
3108     gclog_or_tty->print_cr("[%u] setting up for region " PTR_FORMAT,
3109                            _worker_id, p2i(hr));
3110   }
3111 
3112   _curr_region  = hr;
3113   _finger       = hr->bottom();
3114   update_region_limit();
3115 }
3116 
3117 void CMTask::update_region_limit() {
3118   HeapRegion* hr            = _curr_region;
3119   HeapWord* bottom          = hr->bottom();
3120   HeapWord* limit           = hr->next_top_at_mark_start();
3121 
3122   if (limit == bottom) {
3123     if (_cm->verbose_low()) {
3124       gclog_or_tty->print_cr("[%u] found an empty region "
3125                              "[" PTR_FORMAT ", " PTR_FORMAT ")",




 806   if (cl.complete()) {
 807     clear_all_count_data();
 808   }
 809 
 810   // Repeat the asserts from above.
 811   guarantee(cmThread()->during_cycle(), "invariant");
 812   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 813 }
 814 
 815 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 816   CMBitMap* _bitmap;
 817   bool _error;
 818  public:
 819   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
 820   }
 821 
 822   virtual bool doHeapRegion(HeapRegion* r) {
 823     // This closure can be called concurrently to the mutator, so we must make sure
 824     // that the result of the getNextMarkedWordAddress() call is compared to the
 825     // value passed to it as limit to detect any found bits.
 826     HeapWord* end = r->end();





 827     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 828   }
 829 };
 830 
 831 bool ConcurrentMark::nextMarkBitmapIsClear() {
 832   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 833   _g1h->heap_region_iterate(&cl);
 834   return cl.complete();
 835 }
 836 
 837 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 838 public:
 839   bool doHeapRegion(HeapRegion* r) {

 840     r->note_start_of_marking();

 841     return false;
 842   }
 843 };
 844 
 845 void ConcurrentMark::checkpointRootsInitialPre() {
 846   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 847   G1CollectorPolicy* g1p = g1h->g1_policy();
 848 
 849   _has_aborted = false;
 850 
 851   // Initialize marking structures. This has to be done in a STW phase.
 852   reset();
 853 
 854   // For each region note start of marking.
 855   NoteStartOfMarkHRClosure startcl;
 856   g1h->heap_region_iterate(&startcl);
 857 }
 858 
 859 
 860 void ConcurrentMark::checkpointRootsInitialPost() {


1308 
1309   g1p->record_concurrent_mark_remark_end();
1310 
1311   G1CMIsAliveClosure is_alive(g1h);
1312   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1313 }
1314 
1315 // Base class of the closures that finalize and verify the
1316 // liveness counting data.
1317 class CMCountDataClosureBase: public HeapRegionClosure {
1318 protected:
1319   G1CollectedHeap* _g1h;
1320   ConcurrentMark* _cm;
1321   CardTableModRefBS* _ct_bs;
1322 
1323   BitMap* _region_bm;
1324   BitMap* _card_bm;
1325 
1326   // Takes a region that's not empty (i.e., it has at least one
1327   // live object in it and sets its corresponding bit on the region
1328   // bitmap to 1.


1329   void set_bit_for_region(HeapRegion* hr) {


1330     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();


1331     _region_bm->par_at_put(index, true);






1332   }
1333 
1334 public:
1335   CMCountDataClosureBase(G1CollectedHeap* g1h,
1336                          BitMap* region_bm, BitMap* card_bm):
1337     _g1h(g1h), _cm(g1h->concurrent_mark()),
1338     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1339     _region_bm(region_bm), _card_bm(card_bm) { }
1340 };
1341 
1342 // Closure that calculates the # live objects per region. Used
1343 // for verification purposes during the cleanup pause.
1344 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1345   CMBitMapRO* _bm;
1346   size_t _region_marked_bytes;
1347 
1348 public:
1349   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1350                          BitMap* region_bm, BitMap* card_bm) :
1351     CMCountDataClosureBase(g1h, region_bm, card_bm),
1352     _bm(bm), _region_marked_bytes(0) { }
1353 
1354   bool doHeapRegion(HeapRegion* hr) {












1355     HeapWord* ntams = hr->next_top_at_mark_start();
1356     HeapWord* start = hr->bottom();
1357 
1358     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1359            "Preconditions not met - "
1360            "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1361            p2i(start), p2i(ntams), p2i(hr->end()));
1362 
1363     // Find the first marked object at or after "start".
1364     start = _bm->getNextMarkedWordAddress(start, ntams);
1365 
1366     size_t marked_bytes = 0;
1367 
1368     while (start < ntams) {
1369       oop obj = oop(start);
1370       int obj_sz = obj->size();
1371       HeapWord* obj_end = start + obj_sz;
1372 
1373       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1374       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1375 
1376       // Note: if we're looking at the last region in heap - obj_end
1377       // could be actually just beyond the end of the heap; end_idx
1378       // will then correspond to a (non-existent) card that is also
1379       // just beyond the heap.
1380       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1381         // end of object is not card aligned - increment to cover
1382         // all the cards spanned by the object
1383         end_idx += 1;
1384       }
1385 
1386       // Set the bits in the card BM for the cards spanned by this object.
1387       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1388 
1389       // Add the size of this object to the number of marked bytes.
1390       marked_bytes += (size_t)obj_sz * HeapWordSize;
1391 
1392       if (obj_end > hr->end()) {
1393         break;
1394       }
1395       // Find the next marked object after this one.
1396       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1397     }
1398 
1399     // Mark the allocated-since-marking portion...
1400     HeapWord* top = hr->top();
1401     if (ntams < top) {
1402       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1403       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1404 
1405       // Note: if we're looking at the last region in heap - top
1406       // could be actually just beyond the end of the heap; end_idx
1407       // will then correspond to a (non-existent) card that is also
1408       // just beyond the heap.
1409       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1410         // end of object is not card aligned - increment to cover
1411         // all the cards spanned by the object
1412         end_idx += 1;
1413       }
1414       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);


1449   BitMap* _exp_card_bm;   // Expected card BM values
1450 
1451   int _failures;
1452 
1453 public:
1454   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1455                                 BitMap* region_bm,
1456                                 BitMap* card_bm,
1457                                 BitMap* exp_region_bm,
1458                                 BitMap* exp_card_bm,
1459                                 bool verbose) :
1460     _g1h(g1h), _cm(g1h->concurrent_mark()),
1461     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1462     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1463     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1464     _failures(0) { }
1465 
1466   int failures() const { return _failures; }
1467 
1468   bool doHeapRegion(HeapRegion* hr) {











1469     int failures = 0;
1470 
1471     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1472     // this region and set the corresponding bits in the expected region
1473     // and card bitmaps.
1474     bool res = _calc_cl.doHeapRegion(hr);
1475     assert(res == false, "should be continuing");
1476 
1477     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1478                     Mutex::_no_safepoint_check_flag);
1479 
1480     // Verify the marked bytes for this region.
1481     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1482     size_t act_marked_bytes = hr->next_marked_bytes();
1483 
1484     // We're not OK if expected marked bytes > actual marked bytes. It means
1485     // we have missed accounting some objects during the actual marking.
1486     if (exp_marked_bytes > act_marked_bytes) {
1487       if (_verbose) {
1488         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "


1596   }
1597 
1598   int failures() const { return _failures; }
1599 };
1600 
1601 // Closure that finalizes the liveness counting data.
1602 // Used during the cleanup pause.
1603 // Sets the bits corresponding to the interval [NTAMS, top]
1604 // (which contains the implicitly live objects) in the
1605 // card liveness bitmap. Also sets the bit for each region,
1606 // containing live data, in the region liveness bitmap.
1607 
1608 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1609  public:
1610   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1611                               BitMap* region_bm,
1612                               BitMap* card_bm) :
1613     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1614 
1615   bool doHeapRegion(HeapRegion* hr) {












1616     HeapWord* ntams = hr->next_top_at_mark_start();
1617     HeapWord* top   = hr->top();
1618 
1619     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1620 
1621     // Mark the allocated-since-marking portion...
1622     if (ntams < top) {
1623       // This definitely means the region has live objects.
1624       set_bit_for_region(hr);
1625 
1626       // Now set the bits in the card bitmap for [ntams, top)
1627       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1628       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1629 
1630       // Note: if we're looking at the last region in heap - top
1631       // could be actually just beyond the end of the heap; end_idx
1632       // will then correspond to a (non-existent) card that is also
1633       // just beyond the heap.
1634       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1635         // end of object is not card aligned - increment to cover


1692   HeapRegionSetCount _old_regions_removed;
1693   HeapRegionSetCount _humongous_regions_removed;
1694   HRRSCleanupTask* _hrrs_cleanup_task;
1695 
1696 public:
1697   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1698                              FreeRegionList* local_cleanup_list,
1699                              HRRSCleanupTask* hrrs_cleanup_task) :
1700     _g1(g1),
1701     _freed_bytes(0),
1702     _local_cleanup_list(local_cleanup_list),
1703     _old_regions_removed(),
1704     _humongous_regions_removed(),
1705     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1706 
1707   size_t freed_bytes() { return _freed_bytes; }
1708   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1709   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1710 
1711   bool doHeapRegion(HeapRegion *hr) {
1712     if (hr->is_archive()) {
1713       return false;
1714     }
1715     // We use a claim value of zero here because all regions
1716     // were claimed with value 1 in the FinalCount task.
1717     _g1->reset_gc_time_stamps(hr);
1718     hr->note_end_of_marking();
1719 
1720     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1721       _freed_bytes += hr->used();
1722       hr->set_containing_set(NULL);
1723       if (hr->is_humongous()) {

1724         _humongous_regions_removed.increment(1u, hr->capacity());
1725         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1726       } else {
1727         _old_regions_removed.increment(1u, hr->capacity());
1728         _g1->free_region(hr, _local_cleanup_list, true);
1729       }
1730     } else {
1731       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1732     }
1733 
1734     return false;
1735   }
1736 };
1737 
1738 class G1ParNoteEndTask: public AbstractGangTask {
1739   friend class G1NoteEndOfConcMarkClosure;
1740 
1741 protected:
1742   G1CollectedHeap* _g1h;
1743   FreeRegionList* _cleanup_list;


2367   }
2368 }
2369 
2370 void ConcurrentMark::swapMarkBitMaps() {
2371   CMBitMapRO* temp = _prevMarkBitMap;
2372   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2373   _nextMarkBitMap  = (CMBitMap*)  temp;
2374 }
2375 
2376 // Closure for marking entries in SATB buffers.
2377 class CMSATBBufferClosure : public SATBBufferClosure {
2378 private:
2379   CMTask* _task;
2380   G1CollectedHeap* _g1h;
2381 
2382   // This is very similar to CMTask::deal_with_reference, but with
2383   // more relaxed requirements for the argument, so this must be more
2384   // circumspect about treating the argument as an object.
2385   void do_entry(void* entry) const {
2386     _task->increment_refs_reached();
2387     HeapRegion* hr = _g1h->heap_region_containing(entry);
2388     if (entry < hr->next_top_at_mark_start()) {
2389       // Until we get here, we don't know whether entry refers to a valid
2390       // object; it could instead have been a stale reference.
2391       oop obj = static_cast<oop>(entry);
2392       assert(obj->is_oop(true /* ignore mark word */),
2393              "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
2394       _task->make_reference_grey(obj, hr);
2395     }
2396   }
2397 
2398 public:
2399   CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h)
2400     : _task(task), _g1h(g1h) { }
2401 
2402   virtual void do_buffer(void** buffer, size_t size) {
2403     for (size_t i = 0; i < size; ++i) {
2404       do_entry(buffer[i]);
2405     }
2406   }
2407 };


2540     // last "continues humongous" (CH) region in the sequence, or the
2541     // standard end of the SH region (if the SH is the only region in
2542     // the sequence). That way claim_region() will skip over the CH
2543     // regions. However, there is a subtle race between a CM thread
2544     // executing this method and a mutator thread doing a humongous
2545     // object allocation. The two are not mutually exclusive as the CM
2546     // thread does not need to hold the Heap_lock when it gets
2547     // here. So there is a chance that claim_region() will come across
2548     // a free region that's in the progress of becoming a SH or a CH
2549     // region. In the former case, it will either
2550     //   a) Miss the update to the region's end, in which case it will
2551     //      visit every subsequent CH region, will find their bitmaps
2552     //      empty, and do nothing, or
2553     //   b) Will observe the update of the region's end (in which case
2554     //      it will skip the subsequent CH regions).
2555     // If it comes across a region that suddenly becomes CH, the
2556     // scenario will be similar to b). So, the race between
2557     // claim_region() and a humongous object allocation might force us
2558     // to do a bit of unnecessary work (due to some unnecessary bitmap
2559     // iterations) but it should not introduce and correctness issues.
2560     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
2561 
2562     // Above heap_region_containing may return NULL as we always scan claim
2563     // until the end of the heap. In this case, just jump to the next region.
2564     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2565 
2566     // Is the gap between reading the finger and doing the CAS too long?
2567     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2568     if (res == finger && curr_region != NULL) {
2569       // we succeeded
2570       HeapWord*   bottom        = curr_region->bottom();
2571       HeapWord*   limit         = curr_region->next_top_at_mark_start();
2572 
2573       if (verbose_low()) {
2574         gclog_or_tty->print_cr("[%u] curr_region = " PTR_FORMAT " "
2575                                "[" PTR_FORMAT ", " PTR_FORMAT "), "
2576                                "limit = " PTR_FORMAT,
2577                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2578       }
2579 
2580       // notice that _finger == end cannot be guaranteed here since,
2581       // someone else might have moved the finger even further
2582       assert(_finger >= end, "the finger should have moved forward");


2653 
2654 void ConcurrentMark::verify_no_cset_oops() {
2655   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2656   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2657     return;
2658   }
2659 
2660   // Verify entries on the global mark stack
2661   _markStack.iterate(VerifyNoCSetOops("Stack"));
2662 
2663   // Verify entries on the task queues
2664   for (uint i = 0; i < _max_worker_id; ++i) {
2665     CMTaskQueue* queue = _task_queues->queue(i);
2666     queue->iterate(VerifyNoCSetOops("Queue", i));
2667   }
2668 
2669   // Verify the global finger
2670   HeapWord* global_finger = finger();
2671   if (global_finger != NULL && global_finger < _heap_end) {
2672     // The global finger always points to a heap region boundary. We
2673     // use heap_region_containing() to get the containing region
2674     // given that the global finger could be pointing to a free region
2675     // which subsequently becomes continues humongous. If that
2676     // happens, heap_region_containing() will return the bottom of the
2677     // corresponding starts humongous region and the check below will
2678     // not hold any more.
2679     // Since we always iterate over all regions, we might get a NULL HeapRegion
2680     // here.
2681     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2682     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2683               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2684               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2685   }
2686 
2687   // Verify the task fingers
2688   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2689   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2690     CMTask* task = _tasks[i];
2691     HeapWord* task_finger = task->finger();
2692     if (task_finger != NULL && task_finger < _heap_end) {
2693       // See above note on the global finger verification.
2694       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2695       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2696                 !task_hr->in_collection_set(),
2697                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2698                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2699     }
2700   }
2701 }
2702 #endif // PRODUCT
2703 
2704 // Aggregate the counting data that was constructed concurrently
2705 // with marking.
2706 class AggregateCountDataHRClosure: public HeapRegionClosure {
2707   G1CollectedHeap* _g1h;
2708   ConcurrentMark* _cm;
2709   CardTableModRefBS* _ct_bs;
2710   BitMap* _cm_card_bm;
2711   uint _max_worker_id;
2712 
2713  public:
2714   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2715                               BitMap* cm_card_bm,
2716                               uint max_worker_id) :
2717     _g1h(g1h), _cm(g1h->concurrent_mark()),
2718     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2719     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2720 
2721   bool doHeapRegion(HeapRegion* hr) {











2722     HeapWord* start = hr->bottom();
2723     HeapWord* limit = hr->next_top_at_mark_start();
2724     HeapWord* end = hr->end();
2725 
2726     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2727            "Preconditions not met - "
2728            "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2729            "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2730            p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
2731 
2732     assert(hr->next_marked_bytes() == 0, "Precondition");
2733 
2734     if (start == limit) {
2735       // NTAMS of this region has not been set so nothing to do.
2736       return false;
2737     }
2738 
2739     // 'start' should be in the heap.
2740     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2741     // 'end' *may* be just beyond the end of the heap (if hr is the last region)


3021 
3022 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
3023   ReferenceProcessor* result = NULL;
3024   if (G1UseConcMarkReferenceProcessing) {
3025     result = g1h->ref_processor_cm();
3026     assert(result != NULL, "should not be NULL");
3027   }
3028   return result;
3029 }
3030 
3031 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3032                                ConcurrentMark* cm,
3033                                CMTask* task)
3034   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
3035     _g1h(g1h), _cm(cm), _task(task)
3036 { }
3037 
3038 void CMTask::setup_for_region(HeapRegion* hr) {
3039   assert(hr != NULL,
3040         "claim_region() should have filtered out NULL regions");


3041 
3042   if (_cm->verbose_low()) {
3043     gclog_or_tty->print_cr("[%u] setting up for region " PTR_FORMAT,
3044                            _worker_id, p2i(hr));
3045   }
3046 
3047   _curr_region  = hr;
3048   _finger       = hr->bottom();
3049   update_region_limit();
3050 }
3051 
3052 void CMTask::update_region_limit() {
3053   HeapRegion* hr            = _curr_region;
3054   HeapWord* bottom          = hr->bottom();
3055   HeapWord* limit           = hr->next_top_at_mark_start();
3056 
3057   if (limit == bottom) {
3058     if (_cm->verbose_low()) {
3059       gclog_or_tty->print_cr("[%u] found an empty region "
3060                              "[" PTR_FORMAT ", " PTR_FORMAT ")",


< prev index next >