index

src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page
rev 7209 : 6979279


  67 
  68 static double cost_per_byte_ms_defaults[] = {
  69   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  70 };
  71 
  72 // these should be pretty consistent
  73 static double constant_other_time_ms_defaults[] = {
  74   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  75 };
  76 
  77 
  78 static double young_other_cost_per_region_ms_defaults[] = {
  79   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  80 };
  81 
  82 static double non_young_other_cost_per_region_ms_defaults[] = {
  83   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  84 };
  85 
  86 G1CollectorPolicy::G1CollectorPolicy() :
  87   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
  88                         ? ParallelGCThreads : 1),
  89 
  90   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  91   _stop_world_start(0.0),
  92 
  93   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  94   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  95 
  96   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  97   _prev_collection_pause_end_ms(0.0),
  98   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 106   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 107   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 108   _non_young_other_cost_per_region_ms_seq(


1527     } else {
1528       // The concurrent marking thread is still finishing up the
1529       // previous cycle. If we start one right now the two cycles
1530       // overlap. In particular, the concurrent marking thread might
1531       // be in the process of clearing the next marking bitmap (which
1532       // we will use for the next cycle if we start one). Starting a
1533       // cycle now will be bad given that parts of the marking
1534       // information might get cleared by the marking thread. And we
1535       // cannot wait for the marking thread to finish the cycle as it
1536       // periodically yields while clearing the next marking bitmap
1537       // and, if it's in a yield point, it's waiting for us to
1538       // finish. So, at this point we will not start a cycle and we'll
1539       // let the concurrent marking thread complete the last one.
1540       ergo_verbose0(ErgoConcCycles,
1541                     "do not initiate concurrent cycle",
1542                     ergo_format_reason("concurrent cycle already in progress"));
1543     }
1544   }
1545 }
1546 
1547 class KnownGarbageClosure: public HeapRegionClosure {
1548   G1CollectedHeap* _g1h;
1549   CollectionSetChooser* _hrSorted;
1550 
1551 public:
1552   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
1553     _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
1554 
1555   bool doHeapRegion(HeapRegion* r) {
1556     // We only include humongous regions in collection
1557     // sets when concurrent mark shows that their contained object is
1558     // unreachable.
1559 
1560     // Do we have any marking information for this region?
1561     if (r->is_marked()) {
1562       // We will skip any region that's currently used as an old GC
1563       // alloc region (we should not consider those for collection
1564       // before we fill them up).
1565       if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1566         _hrSorted->add_region(r);
1567       }
1568     }
1569     return false;
1570   }
1571 };
1572 
1573 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1574   G1CollectedHeap* _g1h;
1575   CSetChooserParUpdater _cset_updater;
1576 
1577 public:
1578   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1579                            uint chunk_size) :
1580     _g1h(G1CollectedHeap::heap()),
1581     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1582 
1583   bool doHeapRegion(HeapRegion* r) {
1584     // Do we have any marking information for this region?
1585     if (r->is_marked()) {
1586       // We will skip any region that's currently used as an old GC
1587       // alloc region (we should not consider those for collection
1588       // before we fill them up).
1589       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1590         _cset_updater.add_region(r);
1591       }
1592     }


1600   G1CollectedHeap* _g1;
1601   HeapRegionClaimer _hrclaimer;
1602 
1603 public:
1604   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1605       AbstractGangTask("ParKnownGarbageTask"),
1606       _hrSorted(hrSorted), _chunk_size(chunk_size),
1607       _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1608 
1609   void work(uint worker_id) {
1610     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1611     _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1612   }
1613 };
1614 
1615 void
1616 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1617   _collectionSetChooser->clear();
1618 
1619   uint region_num = _g1->num_regions();
1620   if (G1CollectedHeap::use_parallel_gc_threads()) {
1621     const uint OverpartitionFactor = 4;
1622     uint WorkUnit;
1623     // The use of MinChunkSize = 8 in the original code
1624     // causes some assertion failures when the total number of
1625     // region is less than 8.  The code here tries to fix that.
1626     // Should the original code also be fixed?
1627     if (no_of_gc_threads > 0) {
1628       const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
1629       WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
1630                       MinWorkUnit);
1631     } else {
1632       assert(no_of_gc_threads > 0,
1633         "The active gc workers should be greater than 0");
1634       // In a product build do something reasonable to avoid a crash.
1635       const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1636       WorkUnit =
1637         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1638              MinWorkUnit);
1639     }
1640     _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
1641                                                            WorkUnit);
1642     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
1643     _g1->workers()->run_task(&parKnownGarbageTask);
1644   } else {
1645     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
1646     _g1->heap_region_iterate(&knownGarbagecl);
1647   }
1648 
1649   _collectionSetChooser->sort_regions();
1650 
1651   double end_sec = os::elapsedTime();
1652   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1653   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1654   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1655   _prev_collection_pause_end_ms += elapsed_time_ms;
1656   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
1657 }
1658 
1659 // Add the heap region at the head of the non-incremental collection set
1660 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1661   assert(_inc_cset_build_state == Active, "Precondition");
1662   assert(hr->is_old(), "the region should be old");
1663 
1664   assert(!hr->in_collection_set(), "should not already be in the CSet");
1665   hr->set_in_collection_set(true);
1666   hr->set_next_in_collection_set(_collection_set);
1667   _collection_set = hr;




  67 
  68 static double cost_per_byte_ms_defaults[] = {
  69   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  70 };
  71 
  72 // these should be pretty consistent
  73 static double constant_other_time_ms_defaults[] = {
  74   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  75 };
  76 
  77 
  78 static double young_other_cost_per_region_ms_defaults[] = {
  79   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  80 };
  81 
  82 static double non_young_other_cost_per_region_ms_defaults[] = {
  83   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  84 };
  85 
  86 G1CollectorPolicy::G1CollectorPolicy() :
  87   _parallel_gc_threads(ParallelGCThreads),

  88 
  89   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  90   _stop_world_start(0.0),
  91 
  92   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  93   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  94 
  95   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  96   _prev_collection_pause_end_ms(0.0),
  97   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  98   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 106   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 107   _non_young_other_cost_per_region_ms_seq(


1526     } else {
1527       // The concurrent marking thread is still finishing up the
1528       // previous cycle. If we start one right now the two cycles
1529       // overlap. In particular, the concurrent marking thread might
1530       // be in the process of clearing the next marking bitmap (which
1531       // we will use for the next cycle if we start one). Starting a
1532       // cycle now will be bad given that parts of the marking
1533       // information might get cleared by the marking thread. And we
1534       // cannot wait for the marking thread to finish the cycle as it
1535       // periodically yields while clearing the next marking bitmap
1536       // and, if it's in a yield point, it's waiting for us to
1537       // finish. So, at this point we will not start a cycle and we'll
1538       // let the concurrent marking thread complete the last one.
1539       ergo_verbose0(ErgoConcCycles,
1540                     "do not initiate concurrent cycle",
1541                     ergo_format_reason("concurrent cycle already in progress"));
1542     }
1543   }
1544 }
1545 


























1546 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1547   G1CollectedHeap* _g1h;
1548   CSetChooserParUpdater _cset_updater;
1549 
1550 public:
1551   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1552                            uint chunk_size) :
1553     _g1h(G1CollectedHeap::heap()),
1554     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1555 
1556   bool doHeapRegion(HeapRegion* r) {
1557     // Do we have any marking information for this region?
1558     if (r->is_marked()) {
1559       // We will skip any region that's currently used as an old GC
1560       // alloc region (we should not consider those for collection
1561       // before we fill them up).
1562       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1563         _cset_updater.add_region(r);
1564       }
1565     }


1573   G1CollectedHeap* _g1;
1574   HeapRegionClaimer _hrclaimer;
1575 
1576 public:
1577   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1578       AbstractGangTask("ParKnownGarbageTask"),
1579       _hrSorted(hrSorted), _chunk_size(chunk_size),
1580       _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1581 
1582   void work(uint worker_id) {
1583     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1584     _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1585   }
1586 };
1587 
1588 void
1589 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1590   _collectionSetChooser->clear();
1591 
1592   uint region_num = _g1->num_regions();

1593   const uint OverpartitionFactor = 4;
1594   uint WorkUnit;
1595   // The use of MinChunkSize = 8 in the original code
1596   // causes some assertion failures when the total number of
1597   // region is less than 8.  The code here tries to fix that.
1598   // Should the original code also be fixed?
1599   if (no_of_gc_threads > 0) {
1600     const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
1601     WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
1602                     MinWorkUnit);
1603   } else {
1604     assert(no_of_gc_threads > 0,
1605       "The active gc workers should be greater than 0");
1606     // In a product build do something reasonable to avoid a crash.
1607     const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1608     WorkUnit =
1609       MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1610            MinWorkUnit);
1611   }
1612   _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
1613                                                          WorkUnit);
1614   ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
1615   _g1->workers()->run_task(&parKnownGarbageTask);




1616 
1617   _collectionSetChooser->sort_regions();
1618 
1619   double end_sec = os::elapsedTime();
1620   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1621   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1622   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1623   _prev_collection_pause_end_ms += elapsed_time_ms;
1624   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
1625 }
1626 
1627 // Add the heap region at the head of the non-incremental collection set
1628 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1629   assert(_inc_cset_build_state == Active, "Precondition");
1630   assert(hr->is_old(), "the region should be old");
1631 
1632   assert(!hr->in_collection_set(), "should not already be in the CSet");
1633   hr->set_in_collection_set(true);
1634   hr->set_next_in_collection_set(_collection_set);
1635   _collection_set = hr;


index