src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page
rev 5732 : [mq]: comments2


1058       }
1059     } else {
1060       ergo_verbose0(ErgoMixedGCs,
1061                     "do not start mixed GCs",
1062                     ergo_format_reason("concurrent cycle is about to start"));
1063     }
1064     _last_young_gc = false;
1065   }
1066 
1067   if (!_last_gc_was_young) {
1068     // This is a mixed GC. Here we decide whether to continue doing
1069     // mixed GCs or not.
1070 
1071     if (!next_gc_should_be_mixed("continue mixed GCs",
1072                                  "do not continue mixed GCs")) {
1073       set_gcs_are_young(true);
1074     }
1075   }
1076 
1077   _short_lived_surv_rate_group->start_adding_regions();
1078   // do that for any other surv rate groupsx
1079 
1080   if (update_stats) {
1081     double cost_per_card_ms = 0.0;
1082     if (_pending_cards > 0) {
1083       cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1084       _cost_per_card_ms_seq->add(cost_per_card_ms);
1085     }
1086 
1087     size_t cards_scanned = _g1->cards_scanned();
1088 
1089     double cost_per_entry_ms = 0.0;
1090     if (cards_scanned > 10) {
1091       cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1092       if (_last_gc_was_young) {
1093         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1094       } else {
1095         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1096       }
1097     }
1098 


1724 
1725 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1726   // This routine is used when:
1727   // * adding survivor regions to the incremental cset at the end of an
1728   //   evacuation pause,
1729   // * adding the current allocation region to the incremental cset
1730   //   when it is retired, and
1731   // * updating existing policy information for a region in the
1732   //   incremental cset via young list RSet sampling.
1733   // Therefore this routine may be called at a safepoint by the
1734   // VM thread, or in-between safepoints by mutator threads (when
1735   // retiring the current allocation region) or a concurrent
1736   // refine thread (RSet sampling).
1737 
1738   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1739   size_t used_bytes = hr->used();
1740   _inc_cset_recorded_rs_lengths += rs_length;
1741   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1742   _inc_cset_bytes_used_before += used_bytes;
1743 
1744   // Cache the values we have added to the aggregated informtion
1745   // in the heap region in case we have to remove this region from
1746   // the incremental collection set, or it is updated by the
1747   // rset sampling code
1748   hr->set_recorded_rs_length(rs_length);
1749   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1750 }
1751 
1752 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1753                                                      size_t new_rs_length) {
1754   // Update the CSet information that is dependent on the new RS length
1755   assert(hr->is_young(), "Precondition");
1756   assert(!SafepointSynchronize::is_at_safepoint(),
1757                                                "should not be at a safepoint");
1758 
1759   // We could have updated _inc_cset_recorded_rs_lengths and
1760   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1761   // that atomically, as this code is executed by a concurrent
1762   // refinement thread, potentially concurrently with a mutator thread
1763   // allocating a new region and also updating the same fields. To
1764   // avoid the atomic operations we accumulate these updates on two




1058       }
1059     } else {
1060       ergo_verbose0(ErgoMixedGCs,
1061                     "do not start mixed GCs",
1062                     ergo_format_reason("concurrent cycle is about to start"));
1063     }
1064     _last_young_gc = false;
1065   }
1066 
1067   if (!_last_gc_was_young) {
1068     // This is a mixed GC. Here we decide whether to continue doing
1069     // mixed GCs or not.
1070 
1071     if (!next_gc_should_be_mixed("continue mixed GCs",
1072                                  "do not continue mixed GCs")) {
1073       set_gcs_are_young(true);
1074     }
1075   }
1076 
1077   _short_lived_surv_rate_group->start_adding_regions();
1078   // Do that for any other surv rate groups
1079 
1080   if (update_stats) {
1081     double cost_per_card_ms = 0.0;
1082     if (_pending_cards > 0) {
1083       cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1084       _cost_per_card_ms_seq->add(cost_per_card_ms);
1085     }
1086 
1087     size_t cards_scanned = _g1->cards_scanned();
1088 
1089     double cost_per_entry_ms = 0.0;
1090     if (cards_scanned > 10) {
1091       cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1092       if (_last_gc_was_young) {
1093         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1094       } else {
1095         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1096       }
1097     }
1098 


1724 
1725 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1726   // This routine is used when:
1727   // * adding survivor regions to the incremental cset at the end of an
1728   //   evacuation pause,
1729   // * adding the current allocation region to the incremental cset
1730   //   when it is retired, and
1731   // * updating existing policy information for a region in the
1732   //   incremental cset via young list RSet sampling.
1733   // Therefore this routine may be called at a safepoint by the
1734   // VM thread, or in-between safepoints by mutator threads (when
1735   // retiring the current allocation region) or a concurrent
1736   // refine thread (RSet sampling).
1737 
1738   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1739   size_t used_bytes = hr->used();
1740   _inc_cset_recorded_rs_lengths += rs_length;
1741   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1742   _inc_cset_bytes_used_before += used_bytes;
1743 
1744   // Cache the values we have added to the aggregated information
1745   // in the heap region in case we have to remove this region from
1746   // the incremental collection set, or it is updated by the
1747   // rset sampling code
1748   hr->set_recorded_rs_length(rs_length);
1749   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1750 }
1751 
1752 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1753                                                      size_t new_rs_length) {
1754   // Update the CSet information that is dependent on the new RS length
1755   assert(hr->is_young(), "Precondition");
1756   assert(!SafepointSynchronize::is_at_safepoint(),
1757                                                "should not be at a safepoint");
1758 
1759   // We could have updated _inc_cset_recorded_rs_lengths and
1760   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1761   // that atomically, as this code is executed by a concurrent
1762   // refinement thread, potentially concurrently with a mutator thread
1763   // allocating a new region and also updating the same fields. To
1764   // avoid the atomic operations we accumulate these updates on two


src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File