src/share/vm/gc/g1/g1ConcurrentMark.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc/g1

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page




 107 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 108   _bmStartWord = heap.start();
 109   _bmWordSize = heap.word_size();
 110 
 111   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 112   _bm.set_size(_bmWordSize >> _shifter);
 113 
 114   storage->set_mapping_changed_listener(&_listener);
 115 }
 116 
 117 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 118   if (zero_filled) {
 119     return;
 120   }
 121   // We need to clear the bitmap on commit, removing any existing information.
 122   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 123   _bm->clearRange(mr);
 124 }
 125 
 126 // Closure used for clearing the given mark bitmap.
 127 class ClearBitmapHRClosure : public HeapRegionClosure {
 128  private:
 129   G1ConcurrentMark* _cm;
 130   G1CMBitMap* _bitmap;
 131   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 132  public:
 133   ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 134     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 135   }
 136 
 137   virtual bool doHeapRegion(HeapRegion* r) {
 138     size_t const chunk_size_in_words = M / HeapWordSize;
 139 
 140     HeapWord* cur = r->bottom();
 141     HeapWord* const end = r->end();
 142 
 143     while (cur < end) {
 144       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 145       _bitmap->clearRange(mr);
 146 
 147       cur += chunk_size_in_words;
 148 
 149       // Abort iteration if after yielding the marking has been aborted.
 150       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 151         return true;
 152       }
 153       // Repeat the asserts from before the start of the closure. We will do them
 154       // as asserts here to minimize their overhead on the product. However, we
 155       // will have them as guarantees at the beginning / end of the bitmap
 156       // clearing to get some checking in the product.
 157       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");


 709   // marking bitmap and getting it ready for the next cycle. During
 710   // this time no other cycle can start. So, let's make sure that this
 711   // is the case.
 712   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 713 
 714   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 715   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 716   _parallel_workers->run_task(&task);
 717 
 718   // Clear the liveness counting data. If the marking has been aborted, the abort()
 719   // call already did that.
 720   if (cl.complete()) {
 721     clear_all_count_data();
 722   }
 723 
 724   // Repeat the asserts from above.
 725   guarantee(cmThread()->during_cycle(), "invariant");
 726   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 727 }
 728 
 729 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 730   G1CMBitMap* _bitmap;
 731   bool _error;
 732  public:
 733   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 734   }
 735 
 736   virtual bool doHeapRegion(HeapRegion* r) {
 737     // This closure can be called concurrently to the mutator, so we must make sure
 738     // that the result of the getNextMarkedWordAddress() call is compared to the
 739     // value passed to it as limit to detect any found bits.
 740     // end never changes in G1.
 741     HeapWord* end = r->end();
 742     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 743   }
 744 };
 745 
 746 bool G1ConcurrentMark::nextMarkBitmapIsClear() {
 747   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 748   _g1h->heap_region_iterate(&cl);
 749   return cl.complete();
 750 }
 751 
 752 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 753 public:
 754   bool doHeapRegion(HeapRegion* r) {
 755     r->note_start_of_marking();
 756     return false;
 757   }
 758 };
 759 
 760 void G1ConcurrentMark::checkpointRootsInitialPre() {
 761   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 762   G1CollectorPolicy* g1p = g1h->g1_policy();
 763 
 764   _has_aborted = false;
 765 
 766   // Initialize marking structures. This has to be done in a STW phase.
 767   reset();
 768 
 769   // For each region note start of marking.
 770   NoteStartOfMarkHRClosure startcl;
 771   g1h->heap_region_iterate(&startcl);
 772 }
 773 
 774 
 775 void G1ConcurrentMark::checkpointRootsInitialPost() {
 776   G1CollectedHeap*   g1h = G1CollectedHeap::heap();


1188 public:
1189   G1CMCountDataClosureBase(G1CollectedHeap* g1h,
1190                            BitMap* region_bm, BitMap* card_bm):
1191     _g1h(g1h), _cm(g1h->concurrent_mark()),
1192     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1193     _region_bm(region_bm), _card_bm(card_bm) { }
1194 };
1195 
1196 // Closure that calculates the # live objects per region. Used
1197 // for verification purposes during the cleanup pause.
1198 class CalcLiveObjectsClosure: public G1CMCountDataClosureBase {
1199   G1CMBitMapRO* _bm;
1200   size_t _region_marked_bytes;
1201 
1202 public:
1203   CalcLiveObjectsClosure(G1CMBitMapRO *bm, G1CollectedHeap* g1h,
1204                          BitMap* region_bm, BitMap* card_bm) :
1205     G1CMCountDataClosureBase(g1h, region_bm, card_bm),
1206     _bm(bm), _region_marked_bytes(0) { }
1207 
1208   bool doHeapRegion(HeapRegion* hr) {
1209     HeapWord* ntams = hr->next_top_at_mark_start();
1210     HeapWord* start = hr->bottom();
1211 
1212     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1213            "Preconditions not met - "
1214            "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1215            p2i(start), p2i(ntams), p2i(hr->end()));
1216 
1217     // Find the first marked object at or after "start".
1218     start = _bm->getNextMarkedWordAddress(start, ntams);
1219 
1220     size_t marked_bytes = 0;
1221 
1222     while (start < ntams) {
1223       oop obj = oop(start);
1224       int obj_sz = obj->size();
1225       HeapWord* obj_end = start + obj_sz;
1226 
1227       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1228       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);


1264       // just beyond the heap.
1265       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1266         // end of object is not card aligned - increment to cover
1267         // all the cards spanned by the object
1268         end_idx += 1;
1269       }
1270       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1271 
1272       // This definitely means the region has live objects.
1273       set_bit_for_region(hr);
1274     }
1275 
1276     // Update the live region bitmap.
1277     if (marked_bytes > 0) {
1278       set_bit_for_region(hr);
1279     }
1280 
1281     // Set the marked bytes for the current region so that
1282     // it can be queried by a calling verification routine
1283     _region_marked_bytes = marked_bytes;
1284 
1285     return false;
1286   }
1287 
1288   size_t region_marked_bytes() const { return _region_marked_bytes; }
1289 };
1290 
1291 // Heap region closure used for verifying the counting data
1292 // that was accumulated concurrently and aggregated during
1293 // the remark pause. This closure is applied to the heap
1294 // regions during the STW cleanup pause.
1295 
1296 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1297   G1CollectedHeap* _g1h;
1298   G1ConcurrentMark* _cm;
1299   CalcLiveObjectsClosure _calc_cl;
1300   BitMap* _region_bm;   // Region BM to be verified
1301   BitMap* _card_bm;     // Card BM to be verified
1302 
1303   BitMap* _exp_region_bm; // Expected Region BM values
1304   BitMap* _exp_card_bm;   // Expected card BM values
1305 
1306   int _failures;
1307 
1308 public:
1309   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1310                                 BitMap* region_bm,
1311                                 BitMap* card_bm,
1312                                 BitMap* exp_region_bm,
1313                                 BitMap* exp_card_bm) :
1314     _g1h(g1h), _cm(g1h->concurrent_mark()),
1315     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1316     _region_bm(region_bm), _card_bm(card_bm),
1317     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1318     _failures(0) { }
1319 
1320   int failures() const { return _failures; }
1321 
1322   bool doHeapRegion(HeapRegion* hr) {
1323     int failures = 0;
1324 
1325     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1326     // this region and set the corresponding bits in the expected region
1327     // and card bitmaps.
1328     bool res = _calc_cl.doHeapRegion(hr);
1329     assert(res == false, "should be continuing");
1330 
1331     // Verify the marked bytes for this region.
1332     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1333     size_t act_marked_bytes = hr->next_marked_bytes();
1334 
1335     if (exp_marked_bytes > act_marked_bytes) {
1336       if (hr->is_starts_humongous()) {
1337         // For start_humongous regions, the size of the whole object will be
1338         // in exp_marked_bytes.
1339         HeapRegion* region = hr;
1340         int num_regions;
1341         for (num_regions = 0; region != NULL; num_regions++) {
1342           region = _g1h->next_region_in_humongous(region);
1343         }
1344         if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) {
1345           failures += 1;
1346         } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) {
1347           failures += 1;
1348         }
1349       } else {


1365       failures += 1;
1366     }
1367 
1368     // Verify that the card bit maps for the cards spanned by the current
1369     // region match. We have an error if we have a set bit in the expected
1370     // bit map and the corresponding bit in the actual bitmap is not set.
1371 
1372     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1373     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1374 
1375     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1376       expected = _exp_card_bm->at(i);
1377       actual = _card_bm->at(i);
1378 
1379       if (expected && !actual) {
1380         failures += 1;
1381       }
1382     }
1383 
1384     _failures += failures;
1385 
1386     // We could stop iteration over the heap when we
1387     // find the first violating region by returning true.
1388     return false;
1389   }
1390 };
1391 
1392 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1393 protected:
1394   G1CollectedHeap* _g1h;
1395   G1ConcurrentMark* _cm;
1396   BitMap* _actual_region_bm;
1397   BitMap* _actual_card_bm;
1398 
1399   uint    _n_workers;
1400 
1401   BitMap* _expected_region_bm;
1402   BitMap* _expected_card_bm;
1403 
1404   int  _failures;
1405 
1406   HeapRegionClaimer _hrclaimer;
1407 
1408 public:


1433     Atomic::add(verify_cl.failures(), &_failures);
1434   }
1435 
1436   int failures() const { return _failures; }
1437 };
1438 
1439 // Closure that finalizes the liveness counting data.
1440 // Used during the cleanup pause.
1441 // Sets the bits corresponding to the interval [NTAMS, top]
1442 // (which contains the implicitly live objects) in the
1443 // card liveness bitmap. Also sets the bit for each region,
1444 // containing live data, in the region liveness bitmap.
1445 
1446 class FinalCountDataUpdateClosure: public G1CMCountDataClosureBase {
1447  public:
1448   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1449                               BitMap* region_bm,
1450                               BitMap* card_bm) :
1451     G1CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1452 
1453   bool doHeapRegion(HeapRegion* hr) {
1454     HeapWord* ntams = hr->next_top_at_mark_start();
1455     HeapWord* top   = hr->top();
1456 
1457     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1458 
1459     // Mark the allocated-since-marking portion...
1460     if (ntams < top) {
1461       // This definitely means the region has live objects.
1462       set_bit_for_region(hr);
1463 
1464       // Now set the bits in the card bitmap for [ntams, top)
1465       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1466       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1467 
1468       // Note: if we're looking at the last region in heap - top
1469       // could be actually just beyond the end of the heap; end_idx
1470       // will then correspond to a (non-existent) card that is also
1471       // just beyond the heap.
1472       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1473         // end of object is not card aligned - increment to cover
1474         // all the cards spanned by the object
1475         end_idx += 1;
1476       }
1477 
1478       assert(end_idx <= _card_bm->size(),
1479              "oob: end_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1480              end_idx, _card_bm->size());
1481       assert(start_idx < _card_bm->size(),
1482              "oob: start_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1483              start_idx, _card_bm->size());
1484 
1485       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1486     }
1487 
1488     // Set the bit for the region if it contains live data
1489     if (hr->next_marked_bytes() > 0) {
1490       set_bit_for_region(hr);
1491     }
1492 
1493     return false;
1494   }
1495 };
1496 
1497 class G1ParFinalCountTask: public AbstractGangTask {
1498 protected:
1499   G1CollectedHeap* _g1h;
1500   G1ConcurrentMark* _cm;
1501   BitMap* _actual_region_bm;
1502   BitMap* _actual_card_bm;
1503 
1504   uint    _n_workers;
1505   HeapRegionClaimer _hrclaimer;
1506 
1507 public:
1508   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1509     : AbstractGangTask("G1 final counting"),
1510       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1511       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1512       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1513   }


1529   FreeRegionList* _local_cleanup_list;
1530   uint _old_regions_removed;
1531   uint _humongous_regions_removed;
1532   HRRSCleanupTask* _hrrs_cleanup_task;
1533 
1534 public:
1535   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1536                              FreeRegionList* local_cleanup_list,
1537                              HRRSCleanupTask* hrrs_cleanup_task) :
1538     _g1(g1),
1539     _freed_bytes(0),
1540     _local_cleanup_list(local_cleanup_list),
1541     _old_regions_removed(0),
1542     _humongous_regions_removed(0),
1543     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1544 
1545   size_t freed_bytes() { return _freed_bytes; }
1546   const uint old_regions_removed() { return _old_regions_removed; }
1547   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1548 
1549   bool doHeapRegion(HeapRegion *hr) {
1550     if (hr->is_archive()) {
1551       return false;
1552     }
1553     // We use a claim value of zero here because all regions
1554     // were claimed with value 1 in the FinalCount task.
1555     _g1->reset_gc_time_stamps(hr);
1556     hr->note_end_of_marking();
1557 
1558     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1559       _freed_bytes += hr->used();
1560       hr->set_containing_set(NULL);
1561       if (hr->is_humongous()) {
1562         _humongous_regions_removed++;
1563         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1564       } else {
1565         _old_regions_removed++;
1566         _g1->free_region(hr, _local_cleanup_list, true);
1567       }
1568     } else {
1569       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1570     }
1571 
1572     return false;
1573   }
1574 };
1575 
1576 class G1ParNoteEndTask: public AbstractGangTask {
1577   friend class G1NoteEndOfConcMarkClosure;
1578 
1579 protected:
1580   G1CollectedHeap* _g1h;
1581   FreeRegionList* _cleanup_list;
1582   HeapRegionClaimer _hrclaimer;
1583 
1584 public:
1585   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1586       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1587   }
1588 
1589   void work(uint worker_id) {
1590     FreeRegionList local_cleanup_list("Local Cleanup List");
1591     HRRSCleanupTask hrrs_cleanup_task;
1592     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1593                                            &hrrs_cleanup_task);
1594     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1595     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1596 
1597     // Now update the lists
1598     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1599     {
1600       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1601       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1602 
1603       // If we iterate over the global cleanup list at the end of
1604       // cleanup to do this printing we will not guarantee to only
1605       // generate output for the newly-reclaimed regions (the list
1606       // might not be empty at the beginning of cleanup; we might
1607       // still be working on its previous contents). So we do the
1608       // printing here, before we append the new regions to the global
1609       // cleanup list.
1610 
1611       G1HRPrinter* hr_printer = _g1h->hr_printer();
1612       if (hr_printer->is_active()) {
1613         FreeRegionListIterator iter(&local_cleanup_list);
1614         while (iter.more_available()) {
1615           HeapRegion* hr = iter.get_next();


2424 }
2425 #endif // PRODUCT
2426 
2427 // Aggregate the counting data that was constructed concurrently
2428 // with marking.
2429 class AggregateCountDataHRClosure: public HeapRegionClosure {
2430   G1CollectedHeap* _g1h;
2431   G1ConcurrentMark* _cm;
2432   CardTableModRefBS* _ct_bs;
2433   BitMap* _cm_card_bm;
2434   uint _max_worker_id;
2435 
2436  public:
2437   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2438                               BitMap* cm_card_bm,
2439                               uint max_worker_id) :
2440     _g1h(g1h), _cm(g1h->concurrent_mark()),
2441     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2442     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2443 
2444   bool doHeapRegion(HeapRegion* hr) {
2445     HeapWord* start = hr->bottom();
2446     HeapWord* limit = hr->next_top_at_mark_start();
2447     HeapWord* end = hr->end();
2448 
2449     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2450            "Preconditions not met - "
2451            "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2452            "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2453            p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
2454 
2455     assert(hr->next_marked_bytes() == 0, "Precondition");
2456 
2457     if (start == limit) {
2458       // NTAMS of this region has not been set so nothing to do.
2459       return false;
2460     }
2461 
2462     // 'start' should be in the heap.
2463     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2464     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2465     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2466 
2467     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2468     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2469     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2470 
2471     // If ntams is not card aligned then we bump card bitmap index
2472     // for limit so that we get the all the cards spanned by
2473     // the object ending at ntams.
2474     // Note: if this is the last region in the heap then ntams
2475     // could be actually just beyond the end of the the heap;
2476     // limit_idx will then  correspond to a (non-existent) card
2477     // that is also outside the heap.
2478     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
2479       limit_idx += 1;


2497       // into the global card bitmap.
2498       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
2499 
2500       while (scan_idx < limit_idx) {
2501         assert(task_card_bm->at(scan_idx) == true, "should be");
2502         _cm_card_bm->set_bit(scan_idx);
2503         assert(_cm_card_bm->at(scan_idx) == true, "should be");
2504 
2505         // BitMap::get_next_one_offset() can handle the case when
2506         // its left_offset parameter is greater than its right_offset
2507         // parameter. It does, however, have an early exit if
2508         // left_offset == right_offset. So let's limit the value
2509         // passed in for left offset here.
2510         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
2511         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
2512       }
2513     }
2514 
2515     // Update the marked bytes for this region.
2516     hr->add_to_marked_bytes(marked_bytes);
2517 
2518     // Next heap region
2519     return false;
2520   }
2521 };
2522 
2523 class G1AggregateCountDataTask: public AbstractGangTask {
2524 protected:
2525   G1CollectedHeap* _g1h;
2526   G1ConcurrentMark* _cm;
2527   BitMap* _cm_card_bm;
2528   uint _max_worker_id;
2529   uint _active_workers;
2530   HeapRegionClaimer _hrclaimer;
2531 
2532 public:
2533   G1AggregateCountDataTask(G1CollectedHeap* g1h,
2534                            G1ConcurrentMark* cm,
2535                            BitMap* cm_card_bm,
2536                            uint max_worker_id,
2537                            uint n_workers) :
2538       AbstractGangTask("Count Aggregation"),
2539       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),


3608     *hum_bytes -= bytes;
3609   }
3610   return bytes;
3611 }
3612 
3613 // It deduces the values for a region in a humongous region series
3614 // from the _hum_* fields and updates those accordingly. It assumes
3615 // that that _hum_* fields have already been set up from the "starts
3616 // humongous" region and we visit the regions in address order.
3617 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
3618                                                      size_t* capacity_bytes,
3619                                                      size_t* prev_live_bytes,
3620                                                      size_t* next_live_bytes) {
3621   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
3622   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
3623   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
3624   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
3625   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
3626 }
3627 
3628 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
3629   const char* type       = r->get_type_str();
3630   HeapWord* bottom       = r->bottom();
3631   HeapWord* end          = r->end();
3632   size_t capacity_bytes  = r->capacity();
3633   size_t used_bytes      = r->used();
3634   size_t prev_live_bytes = r->live_bytes();
3635   size_t next_live_bytes = r->next_live_bytes();
3636   double gc_eff          = r->gc_efficiency();
3637   size_t remset_bytes    = r->rem_set()->mem_size();
3638   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3639 
3640   if (r->is_starts_humongous()) {
3641     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
3642            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
3643            "they should have been zeroed after the last time we used them");
3644     // Set up the _hum_* fields.
3645     _hum_capacity_bytes  = capacity_bytes;
3646     _hum_used_bytes      = used_bytes;
3647     _hum_prev_live_bytes = prev_live_bytes;
3648     _hum_next_live_bytes = next_live_bytes;


3658   _total_used_bytes      += used_bytes;
3659   _total_capacity_bytes  += capacity_bytes;
3660   _total_prev_live_bytes += prev_live_bytes;
3661   _total_next_live_bytes += next_live_bytes;
3662   _total_remset_bytes    += remset_bytes;
3663   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3664 
3665   // Print a line for this particular region.
3666   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3667                           G1PPRL_TYPE_FORMAT
3668                           G1PPRL_ADDR_BASE_FORMAT
3669                           G1PPRL_BYTE_FORMAT
3670                           G1PPRL_BYTE_FORMAT
3671                           G1PPRL_BYTE_FORMAT
3672                           G1PPRL_DOUBLE_FORMAT
3673                           G1PPRL_BYTE_FORMAT
3674                           G1PPRL_BYTE_FORMAT,
3675                           type, p2i(bottom), p2i(end),
3676                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3677                           remset_bytes, strong_code_roots_bytes);
3678 
3679   return false;
3680 }
3681 
3682 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3683   // add static memory usages to remembered set sizes
3684   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3685   // Print the footer of the output.
3686   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3687   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3688                          " SUMMARY"
3689                          G1PPRL_SUM_MB_FORMAT("capacity")
3690                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3691                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3692                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3693                          G1PPRL_SUM_MB_FORMAT("remset")
3694                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3695                          bytes_to_mb(_total_capacity_bytes),
3696                          bytes_to_mb(_total_used_bytes),
3697                          perc(_total_used_bytes, _total_capacity_bytes),
3698                          bytes_to_mb(_total_prev_live_bytes),
3699                          perc(_total_prev_live_bytes, _total_capacity_bytes),


 107 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 108   _bmStartWord = heap.start();
 109   _bmWordSize = heap.word_size();
 110 
 111   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
 112   _bm.set_size(_bmWordSize >> _shifter);
 113 
 114   storage->set_mapping_changed_listener(&_listener);
 115 }
 116 
 117 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 118   if (zero_filled) {
 119     return;
 120   }
 121   // We need to clear the bitmap on commit, removing any existing information.
 122   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 123   _bm->clearRange(mr);
 124 }
 125 
 126 // Closure used for clearing the given mark bitmap.
 127 class ClearBitmapHRClosure : public AbortableHeapRegionClosure {
 128  private:
 129   G1ConcurrentMark* _cm;
 130   G1CMBitMap* _bitmap;
 131   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
 132  public:
 133   ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : AbortableHeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
 134     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
 135   }
 136 
 137   virtual bool doHeapRegionAbortable(HeapRegion* r) {
 138     size_t const chunk_size_in_words = M / HeapWordSize;
 139 
 140     HeapWord* cur = r->bottom();
 141     HeapWord* const end = r->end();
 142 
 143     while (cur < end) {
 144       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 145       _bitmap->clearRange(mr);
 146 
 147       cur += chunk_size_in_words;
 148 
 149       // Abort iteration if after yielding the marking has been aborted.
 150       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
 151         return true;
 152       }
 153       // Repeat the asserts from before the start of the closure. We will do them
 154       // as asserts here to minimize their overhead on the product. However, we
 155       // will have them as guarantees at the beginning / end of the bitmap
 156       // clearing to get some checking in the product.
 157       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");


 709   // marking bitmap and getting it ready for the next cycle. During
 710   // this time no other cycle can start. So, let's make sure that this
 711   // is the case.
 712   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 713 
 714   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
 715   ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
 716   _parallel_workers->run_task(&task);
 717 
 718   // Clear the liveness counting data. If the marking has been aborted, the abort()
 719   // call already did that.
 720   if (cl.complete()) {
 721     clear_all_count_data();
 722   }
 723 
 724   // Repeat the asserts from above.
 725   guarantee(cmThread()->during_cycle(), "invariant");
 726   guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
 727 }
 728 
 729 class CheckBitmapClearHRClosure : public AbortableHeapRegionClosure {
 730   G1CMBitMap* _bitmap;
 731   bool _error;
 732  public:
 733   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 734   }
 735 
 736   virtual bool doHeapRegionAbortable(HeapRegion* r) {
 737     // This closure can be called concurrently to the mutator, so we must make sure
 738     // that the result of the getNextMarkedWordAddress() call is compared to the
 739     // value passed to it as limit to detect any found bits.
 740     // end never changes in G1.
 741     HeapWord* end = r->end();
 742     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 743   }
 744 };
 745 
 746 bool G1ConcurrentMark::nextMarkBitmapIsClear() {
 747   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 748   _g1h->heap_region_iterate(&cl);
 749   return cl.complete();
 750 }
 751 
 752 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 753 public:
 754   void doHeapRegion(HeapRegion* r) {
 755     r->note_start_of_marking();

 756   }
 757 };
 758 
 759 void G1ConcurrentMark::checkpointRootsInitialPre() {
 760   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 761   G1CollectorPolicy* g1p = g1h->g1_policy();
 762 
 763   _has_aborted = false;
 764 
 765   // Initialize marking structures. This has to be done in a STW phase.
 766   reset();
 767 
 768   // For each region note start of marking.
 769   NoteStartOfMarkHRClosure startcl;
 770   g1h->heap_region_iterate(&startcl);
 771 }
 772 
 773 
 774 void G1ConcurrentMark::checkpointRootsInitialPost() {
 775   G1CollectedHeap*   g1h = G1CollectedHeap::heap();


1187 public:
1188   G1CMCountDataClosureBase(G1CollectedHeap* g1h,
1189                            BitMap* region_bm, BitMap* card_bm):
1190     _g1h(g1h), _cm(g1h->concurrent_mark()),
1191     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
1192     _region_bm(region_bm), _card_bm(card_bm) { }
1193 };
1194 
1195 // Closure that calculates the # live objects per region. Used
1196 // for verification purposes during the cleanup pause.
1197 class CalcLiveObjectsClosure: public G1CMCountDataClosureBase {
1198   G1CMBitMapRO* _bm;
1199   size_t _region_marked_bytes;
1200 
1201 public:
1202   CalcLiveObjectsClosure(G1CMBitMapRO *bm, G1CollectedHeap* g1h,
1203                          BitMap* region_bm, BitMap* card_bm) :
1204     G1CMCountDataClosureBase(g1h, region_bm, card_bm),
1205     _bm(bm), _region_marked_bytes(0) { }
1206 
1207   void doHeapRegion(HeapRegion* hr) {
1208     HeapWord* ntams = hr->next_top_at_mark_start();
1209     HeapWord* start = hr->bottom();
1210 
1211     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1212            "Preconditions not met - "
1213            "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
1214            p2i(start), p2i(ntams), p2i(hr->end()));
1215 
1216     // Find the first marked object at or after "start".
1217     start = _bm->getNextMarkedWordAddress(start, ntams);
1218 
1219     size_t marked_bytes = 0;
1220 
1221     while (start < ntams) {
1222       oop obj = oop(start);
1223       int obj_sz = obj->size();
1224       HeapWord* obj_end = start + obj_sz;
1225 
1226       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1227       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);


1263       // just beyond the heap.
1264       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1265         // end of object is not card aligned - increment to cover
1266         // all the cards spanned by the object
1267         end_idx += 1;
1268       }
1269       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1270 
1271       // This definitely means the region has live objects.
1272       set_bit_for_region(hr);
1273     }
1274 
1275     // Update the live region bitmap.
1276     if (marked_bytes > 0) {
1277       set_bit_for_region(hr);
1278     }
1279 
1280     // Set the marked bytes for the current region so that
1281     // it can be queried by a calling verification routine
1282     _region_marked_bytes = marked_bytes;


1283   }
1284 
1285   size_t region_marked_bytes() const { return _region_marked_bytes; }
1286 };
1287 
1288 // Heap region closure used for verifying the counting data
1289 // that was accumulated concurrently and aggregated during
1290 // the remark pause. This closure is applied to the heap
1291 // regions during the STW cleanup pause.
1292 
1293 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1294   G1CollectedHeap* _g1h;
1295   G1ConcurrentMark* _cm;
1296   CalcLiveObjectsClosure _calc_cl;
1297   BitMap* _region_bm;   // Region BM to be verified
1298   BitMap* _card_bm;     // Card BM to be verified
1299 
1300   BitMap* _exp_region_bm; // Expected Region BM values
1301   BitMap* _exp_card_bm;   // Expected card BM values
1302 
1303   int _failures;
1304 
1305 public:
1306   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1307                                 BitMap* region_bm,
1308                                 BitMap* card_bm,
1309                                 BitMap* exp_region_bm,
1310                                 BitMap* exp_card_bm) :
1311     _g1h(g1h), _cm(g1h->concurrent_mark()),
1312     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1313     _region_bm(region_bm), _card_bm(card_bm),
1314     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1315     _failures(0) { }
1316 
1317   int failures() const { return _failures; }
1318 
1319   void doHeapRegion(HeapRegion* hr) {
1320     int failures = 0;
1321 
1322     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1323     // this region and set the corresponding bits in the expected region
1324     // and card bitmaps.
1325     _calc_cl.doHeapRegion(hr);

1326 
1327     // Verify the marked bytes for this region.
1328     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1329     size_t act_marked_bytes = hr->next_marked_bytes();
1330 
1331     if (exp_marked_bytes > act_marked_bytes) {
1332       if (hr->is_starts_humongous()) {
1333         // For start_humongous regions, the size of the whole object will be
1334         // in exp_marked_bytes.
1335         HeapRegion* region = hr;
1336         int num_regions;
1337         for (num_regions = 0; region != NULL; num_regions++) {
1338           region = _g1h->next_region_in_humongous(region);
1339         }
1340         if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) {
1341           failures += 1;
1342         } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) {
1343           failures += 1;
1344         }
1345       } else {


1361       failures += 1;
1362     }
1363 
1364     // Verify that the card bit maps for the cards spanned by the current
1365     // region match. We have an error if we have a set bit in the expected
1366     // bit map and the corresponding bit in the actual bitmap is not set.
1367 
1368     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1369     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1370 
1371     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1372       expected = _exp_card_bm->at(i);
1373       actual = _card_bm->at(i);
1374 
1375       if (expected && !actual) {
1376         failures += 1;
1377       }
1378     }
1379 
1380     _failures += failures;




1381   }
1382 };
1383 
1384 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1385 protected:
1386   G1CollectedHeap* _g1h;
1387   G1ConcurrentMark* _cm;
1388   BitMap* _actual_region_bm;
1389   BitMap* _actual_card_bm;
1390 
1391   uint    _n_workers;
1392 
1393   BitMap* _expected_region_bm;
1394   BitMap* _expected_card_bm;
1395 
1396   int  _failures;
1397 
1398   HeapRegionClaimer _hrclaimer;
1399 
1400 public:


1425     Atomic::add(verify_cl.failures(), &_failures);
1426   }
1427 
1428   int failures() const { return _failures; }
1429 };
1430 
1431 // Closure that finalizes the liveness counting data.
1432 // Used during the cleanup pause.
1433 // Sets the bits corresponding to the interval [NTAMS, top]
1434 // (which contains the implicitly live objects) in the
1435 // card liveness bitmap. Also sets the bit for each region,
1436 // containing live data, in the region liveness bitmap.
1437 
1438 class FinalCountDataUpdateClosure: public G1CMCountDataClosureBase {
1439  public:
1440   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1441                               BitMap* region_bm,
1442                               BitMap* card_bm) :
1443     G1CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1444 
1445   void doHeapRegion(HeapRegion* hr) {
1446     HeapWord* ntams = hr->next_top_at_mark_start();
1447     HeapWord* top   = hr->top();
1448 
1449     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1450 
1451     // Mark the allocated-since-marking portion...
1452     if (ntams < top) {
1453       // This definitely means the region has live objects.
1454       set_bit_for_region(hr);
1455 
1456       // Now set the bits in the card bitmap for [ntams, top)
1457       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1458       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1459 
1460       // Note: if we're looking at the last region in heap - top
1461       // could be actually just beyond the end of the heap; end_idx
1462       // will then correspond to a (non-existent) card that is also
1463       // just beyond the heap.
1464       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1465         // end of object is not card aligned - increment to cover
1466         // all the cards spanned by the object
1467         end_idx += 1;
1468       }
1469 
1470       assert(end_idx <= _card_bm->size(),
1471              "oob: end_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1472              end_idx, _card_bm->size());
1473       assert(start_idx < _card_bm->size(),
1474              "oob: start_idx=  " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
1475              start_idx, _card_bm->size());
1476 
1477       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1478     }
1479 
1480     // Set the bit for the region if it contains live data
1481     if (hr->next_marked_bytes() > 0) {
1482       set_bit_for_region(hr);
1483     }


1484   }
1485 };
1486 
1487 class G1ParFinalCountTask: public AbstractGangTask {
1488 protected:
1489   G1CollectedHeap* _g1h;
1490   G1ConcurrentMark* _cm;
1491   BitMap* _actual_region_bm;
1492   BitMap* _actual_card_bm;
1493 
1494   uint    _n_workers;
1495   HeapRegionClaimer _hrclaimer;
1496 
1497 public:
1498   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1499     : AbstractGangTask("G1 final counting"),
1500       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1501       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1502       _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
1503   }


1519   FreeRegionList* _local_cleanup_list;
1520   uint _old_regions_removed;
1521   uint _humongous_regions_removed;
1522   HRRSCleanupTask* _hrrs_cleanup_task;
1523 
1524 public:
1525   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1526                              FreeRegionList* local_cleanup_list,
1527                              HRRSCleanupTask* hrrs_cleanup_task) :
1528     _g1(g1),
1529     _freed_bytes(0),
1530     _local_cleanup_list(local_cleanup_list),
1531     _old_regions_removed(0),
1532     _humongous_regions_removed(0),
1533     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1534 
1535   size_t freed_bytes() { return _freed_bytes; }
1536   const uint old_regions_removed() { return _old_regions_removed; }
1537   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1538 
1539   void doHeapRegion(HeapRegion *hr) {
1540     if (hr->is_archive()) {
1541       return;
1542     }
1543     // We use a claim value of zero here because all regions
1544     // were claimed with value 1 in the FinalCount task.
1545     _g1->reset_gc_time_stamps(hr);
1546     hr->note_end_of_marking();
1547 
1548     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1549       _freed_bytes += hr->used();
1550       hr->set_containing_set(NULL);
1551       if (hr->is_humongous()) {
1552         _humongous_regions_removed++;
1553         _g1->free_humongous_region(hr, _local_cleanup_list, true);
1554       } else {
1555         _old_regions_removed++;
1556         _g1->free_region(hr, _local_cleanup_list, true);
1557       }
1558     } else {
1559       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1560     }


1561   }
1562 };
1563 
1564 class G1ParNoteEndTask: public AbstractGangTask {
1565   friend class G1NoteEndOfConcMarkClosure;
1566 
1567 protected:
1568   G1CollectedHeap* _g1h;
1569   FreeRegionList* _cleanup_list;
1570   HeapRegionClaimer _hrclaimer;
1571 
1572 public:
1573   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1574       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1575   }
1576 
1577   void work(uint worker_id) {
1578     FreeRegionList local_cleanup_list("Local Cleanup List");
1579     HRRSCleanupTask hrrs_cleanup_task;
1580     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1581                                            &hrrs_cleanup_task);
1582     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);

1583 
1584     // Now update the lists
1585     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1586     {
1587       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1588       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1589 
1590       // If we iterate over the global cleanup list at the end of
1591       // cleanup to do this printing we will not guarantee to only
1592       // generate output for the newly-reclaimed regions (the list
1593       // might not be empty at the beginning of cleanup; we might
1594       // still be working on its previous contents). So we do the
1595       // printing here, before we append the new regions to the global
1596       // cleanup list.
1597 
1598       G1HRPrinter* hr_printer = _g1h->hr_printer();
1599       if (hr_printer->is_active()) {
1600         FreeRegionListIterator iter(&local_cleanup_list);
1601         while (iter.more_available()) {
1602           HeapRegion* hr = iter.get_next();


2411 }
2412 #endif // PRODUCT
2413 
2414 // Aggregate the counting data that was constructed concurrently
2415 // with marking.
2416 class AggregateCountDataHRClosure: public HeapRegionClosure {
2417   G1CollectedHeap* _g1h;
2418   G1ConcurrentMark* _cm;
2419   CardTableModRefBS* _ct_bs;
2420   BitMap* _cm_card_bm;
2421   uint _max_worker_id;
2422 
2423  public:
2424   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
2425                               BitMap* cm_card_bm,
2426                               uint max_worker_id) :
2427     _g1h(g1h), _cm(g1h->concurrent_mark()),
2428     _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
2429     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
2430 
2431   void doHeapRegion(HeapRegion* hr) {
2432     HeapWord* start = hr->bottom();
2433     HeapWord* limit = hr->next_top_at_mark_start();
2434     HeapWord* end = hr->end();
2435 
2436     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
2437            "Preconditions not met - "
2438            "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
2439            "top: " PTR_FORMAT ", end: " PTR_FORMAT,
2440            p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
2441 
2442     assert(hr->next_marked_bytes() == 0, "Precondition");
2443 
2444     if (start == limit) {
2445       // NTAMS of this region has not been set so nothing to do.
2446       return;
2447     }
2448 
2449     // 'start' should be in the heap.
2450     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
2451     // 'end' *may* be just beyond the end of the heap (if hr is the last region)
2452     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
2453 
2454     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
2455     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
2456     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
2457 
2458     // If ntams is not card aligned then we bump card bitmap index
2459     // for limit so that we get the all the cards spanned by
2460     // the object ending at ntams.
2461     // Note: if this is the last region in the heap then ntams
2462     // could be actually just beyond the end of the the heap;
2463     // limit_idx will then  correspond to a (non-existent) card
2464     // that is also outside the heap.
2465     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
2466       limit_idx += 1;


2484       // into the global card bitmap.
2485       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
2486 
2487       while (scan_idx < limit_idx) {
2488         assert(task_card_bm->at(scan_idx) == true, "should be");
2489         _cm_card_bm->set_bit(scan_idx);
2490         assert(_cm_card_bm->at(scan_idx) == true, "should be");
2491 
2492         // BitMap::get_next_one_offset() can handle the case when
2493         // its left_offset parameter is greater than its right_offset
2494         // parameter. It does, however, have an early exit if
2495         // left_offset == right_offset. So let's limit the value
2496         // passed in for left offset here.
2497         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
2498         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
2499       }
2500     }
2501 
2502     // Update the marked bytes for this region.
2503     hr->add_to_marked_bytes(marked_bytes);



2504   }
2505 };
2506 
2507 class G1AggregateCountDataTask: public AbstractGangTask {
2508 protected:
2509   G1CollectedHeap* _g1h;
2510   G1ConcurrentMark* _cm;
2511   BitMap* _cm_card_bm;
2512   uint _max_worker_id;
2513   uint _active_workers;
2514   HeapRegionClaimer _hrclaimer;
2515 
2516 public:
2517   G1AggregateCountDataTask(G1CollectedHeap* g1h,
2518                            G1ConcurrentMark* cm,
2519                            BitMap* cm_card_bm,
2520                            uint max_worker_id,
2521                            uint n_workers) :
2522       AbstractGangTask("Count Aggregation"),
2523       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),


3592     *hum_bytes -= bytes;
3593   }
3594   return bytes;
3595 }
3596 
3597 // It deduces the values for a region in a humongous region series
3598 // from the _hum_* fields and updates those accordingly. It assumes
3599 // that that _hum_* fields have already been set up from the "starts
3600 // humongous" region and we visit the regions in address order.
3601 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
3602                                                      size_t* capacity_bytes,
3603                                                      size_t* prev_live_bytes,
3604                                                      size_t* next_live_bytes) {
3605   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
3606   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
3607   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
3608   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
3609   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
3610 }
3611 
3612 void G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
3613   const char* type       = r->get_type_str();
3614   HeapWord* bottom       = r->bottom();
3615   HeapWord* end          = r->end();
3616   size_t capacity_bytes  = r->capacity();
3617   size_t used_bytes      = r->used();
3618   size_t prev_live_bytes = r->live_bytes();
3619   size_t next_live_bytes = r->next_live_bytes();
3620   double gc_eff          = r->gc_efficiency();
3621   size_t remset_bytes    = r->rem_set()->mem_size();
3622   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3623 
3624   if (r->is_starts_humongous()) {
3625     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
3626            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
3627            "they should have been zeroed after the last time we used them");
3628     // Set up the _hum_* fields.
3629     _hum_capacity_bytes  = capacity_bytes;
3630     _hum_used_bytes      = used_bytes;
3631     _hum_prev_live_bytes = prev_live_bytes;
3632     _hum_next_live_bytes = next_live_bytes;


3642   _total_used_bytes      += used_bytes;
3643   _total_capacity_bytes  += capacity_bytes;
3644   _total_prev_live_bytes += prev_live_bytes;
3645   _total_next_live_bytes += next_live_bytes;
3646   _total_remset_bytes    += remset_bytes;
3647   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3648 
3649   // Print a line for this particular region.
3650   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3651                           G1PPRL_TYPE_FORMAT
3652                           G1PPRL_ADDR_BASE_FORMAT
3653                           G1PPRL_BYTE_FORMAT
3654                           G1PPRL_BYTE_FORMAT
3655                           G1PPRL_BYTE_FORMAT
3656                           G1PPRL_DOUBLE_FORMAT
3657                           G1PPRL_BYTE_FORMAT
3658                           G1PPRL_BYTE_FORMAT,
3659                           type, p2i(bottom), p2i(end),
3660                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3661                           remset_bytes, strong_code_roots_bytes);


3662 }
3663 
3664 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3665   // add static memory usages to remembered set sizes
3666   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3667   // Print the footer of the output.
3668   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3669   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3670                          " SUMMARY"
3671                          G1PPRL_SUM_MB_FORMAT("capacity")
3672                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3673                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3674                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3675                          G1PPRL_SUM_MB_FORMAT("remset")
3676                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3677                          bytes_to_mb(_total_capacity_bytes),
3678                          bytes_to_mb(_total_used_bytes),
3679                          perc(_total_used_bytes, _total_capacity_bytes),
3680                          bytes_to_mb(_total_prev_live_bytes),
3681                          perc(_total_prev_live_bytes, _total_capacity_bytes),
src/share/vm/gc/g1/g1ConcurrentMark.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File