< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page
rev 9277 : imported patch 8140597-forcing-initial-mark-causes-abort-mixed-collections


 913 }
 914 
 915 double G1CollectorPolicy::non_young_other_time_ms() const {
 916   return phase_times()->non_young_cset_choice_time_ms() +
 917          phase_times()->non_young_free_cset_time_ms();
 918 
 919 }
 920 
 921 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
 922   return pause_time_ms -
 923          average_time_ms(G1GCPhaseTimes::UpdateRS) -
 924          average_time_ms(G1GCPhaseTimes::ScanRS) -
 925          average_time_ms(G1GCPhaseTimes::ObjCopy) -
 926          average_time_ms(G1GCPhaseTimes::Termination);
 927 }
 928 
 929 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 930   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 931 }
 932 




 933 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 934   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
 935     return false;
 936   }
 937 
 938   size_t marking_initiating_used_threshold =
 939     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 940   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 941   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 942 
 943   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 944     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 945       ergo_verbose5(ErgoConcCycles,
 946         "request concurrent cycle initiation",
 947         ergo_format_reason("occupancy higher than threshold")
 948         ergo_format_byte("occupancy")
 949         ergo_format_byte("allocation request")
 950         ergo_format_byte_perc("threshold")
 951         ergo_format_str("source"),
 952         cur_used_bytes,
 953         alloc_byte_size,
 954         marking_initiating_used_threshold,


1054       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1055       if (_recent_avg_pause_time_ratio < 0.0) {
1056         _recent_avg_pause_time_ratio = 0.0;
1057       } else {
1058         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1059         _recent_avg_pause_time_ratio = 1.0;
1060       }
1061     }
1062   }
1063 
1064   bool new_in_marking_window = collector_state()->in_marking_window();
1065   bool new_in_marking_window_im = false;
1066   if (last_pause_included_initial_mark) {
1067     new_in_marking_window = true;
1068     new_in_marking_window_im = true;
1069   }
1070 
1071   if (collector_state()->last_young_gc()) {
1072     // This is supposed to to be the "last young GC" before we start
1073     // doing mixed GCs. Here we decide whether to start mixed GCs or not.

1074 
1075     if (!last_pause_included_initial_mark) {
1076       if (next_gc_should_be_mixed("start mixed GCs",
1077                                   "do not start mixed GCs")) {
1078         collector_state()->set_gcs_are_young(false);
1079       }
1080     } else {
1081       ergo_verbose0(ErgoMixedGCs,
1082                     "do not start mixed GCs",
1083                     ergo_format_reason("concurrent cycle is about to start"));
1084     }
1085     collector_state()->set_last_young_gc(false);
1086   }
1087 
1088   if (!collector_state()->last_gc_was_young()) {
1089     // This is a mixed GC. Here we decide whether to continue doing
1090     // mixed GCs or not.
1091 
1092     if (!next_gc_should_be_mixed("continue mixed GCs",
1093                                  "do not continue mixed GCs")) {
1094       collector_state()->set_gcs_are_young(true);
1095     }
1096   }
1097 
1098   _short_lived_surv_rate_group->start_adding_regions();
1099   // Do that for any other surv rate groups
1100 
1101   if (update_stats) {
1102     double cost_per_card_ms = 0.0;
1103     double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1104     if (_pending_cards > 0) {


1582     expansion_region_num = (uint) ceil(expansion_region_num_d);
1583   } else {
1584     assert(expansion_region_num == 0, "sanity");
1585   }
1586   _young_list_max_length = _young_list_target_length + expansion_region_num;
1587   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1588 }
1589 
1590 // Calculates survivor space parameters.
1591 void G1CollectorPolicy::update_survivors_policy() {
1592   double max_survivor_regions_d =
1593                  (double) _young_list_target_length / (double) SurvivorRatio;
1594   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1595   // smaller than 1.0) we'll get 1.
1596   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1597 
1598   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1599         HeapRegion::GrainWords * _max_survivor_regions, counters());
1600 }
1601 
1602 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1603                                                      GCCause::Cause gc_cause) {


1604   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1605   if (!during_cycle) {
1606     ergo_verbose1(ErgoConcCycles,
1607                   "request concurrent cycle initiation",
1608                   ergo_format_reason("requested by GC cause")
1609                   ergo_format_str("GC cause"),
1610                   GCCause::to_string(gc_cause));
1611     collector_state()->set_initiate_conc_mark_if_possible(true);
1612     return true;
1613   } else {
1614     ergo_verbose1(ErgoConcCycles,
1615                   "do not request concurrent cycle initiation",
1616                   ergo_format_reason("concurrent cycle already in progress")
1617                   ergo_format_str("GC cause"),
1618                   GCCause::to_string(gc_cause));
1619     return false;
1620   }
1621 }
1622 
1623 void
1624 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1625   // We are about to decide on whether this pause will be an
1626   // initial-mark pause.
1627 
1628   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1629   // will set it here if we have to. However, it should be cleared by
1630   // the end of the pause (it's only set for the duration of an
1631   // initial-mark pause).
1632   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1633 
1634   if (collector_state()->initiate_conc_mark_if_possible()) {
1635     // We had noticed on a previous pause that the heap occupancy has
1636     // gone over the initiating threshold and we should start a
1637     // concurrent marking cycle. So we might initiate one.
1638 
1639     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1640     if (!during_cycle) {
1641       // The concurrent marking thread is not "during a cycle", i.e.,
1642       // it has completed the last one. So we can go ahead and
1643       // initiate a new cycle.
1644 
1645       collector_state()->set_during_initial_mark_pause(true);
1646       // We do not allow mixed GCs during marking.
1647       if (!collector_state()->gcs_are_young()) {
1648         collector_state()->set_gcs_are_young(true);
1649         ergo_verbose0(ErgoMixedGCs,
1650                       "end mixed GCs",
1651                       ergo_format_reason("concurrent cycle is about to start"));
1652       }
1653 
1654       // And we can now clear initiate_conc_mark_if_possible() as
1655       // we've already acted on it.
1656       collector_state()->set_initiate_conc_mark_if_possible(false);
1657 
1658       ergo_verbose0(ErgoConcCycles,
1659                   "initiate concurrent cycle",
1660                   ergo_format_reason("concurrent cycle initiation requested"));
1661     } else {
1662       // The concurrent marking thread is still finishing up the
1663       // previous cycle. If we start one right now the two cycles
1664       // overlap. In particular, the concurrent marking thread might
1665       // be in the process of clearing the next marking bitmap (which
1666       // we will use for the next cycle if we start one). Starting a
1667       // cycle now will be bad given that parts of the marking
1668       // information might get cleared by the marking thread. And we
1669       // cannot wait for the marking thread to finish the cycle as it
1670       // periodically yields while clearing the next marking bitmap
1671       // and, if it's in a yield point, it's waiting for us to
1672       // finish. So, at this point we will not start a cycle and we'll
1673       // let the concurrent marking thread complete the last one.




 913 }
 914 
 915 double G1CollectorPolicy::non_young_other_time_ms() const {
 916   return phase_times()->non_young_cset_choice_time_ms() +
 917          phase_times()->non_young_free_cset_time_ms();
 918 
 919 }
 920 
 921 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
 922   return pause_time_ms -
 923          average_time_ms(G1GCPhaseTimes::UpdateRS) -
 924          average_time_ms(G1GCPhaseTimes::ScanRS) -
 925          average_time_ms(G1GCPhaseTimes::ObjCopy) -
 926          average_time_ms(G1GCPhaseTimes::Termination);
 927 }
 928 
 929 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 930   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 931 }
 932 
 933 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
 934   return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
 935 }
 936 
 937 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 938   if (about_to_start_mixed_phase()) {
 939     return false;
 940   }
 941 
 942   size_t marking_initiating_used_threshold =
 943     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 944   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 945   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 946 
 947   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 948     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 949       ergo_verbose5(ErgoConcCycles,
 950         "request concurrent cycle initiation",
 951         ergo_format_reason("occupancy higher than threshold")
 952         ergo_format_byte("occupancy")
 953         ergo_format_byte("allocation request")
 954         ergo_format_byte_perc("threshold")
 955         ergo_format_str("source"),
 956         cur_used_bytes,
 957         alloc_byte_size,
 958         marking_initiating_used_threshold,


1058       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1059       if (_recent_avg_pause_time_ratio < 0.0) {
1060         _recent_avg_pause_time_ratio = 0.0;
1061       } else {
1062         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1063         _recent_avg_pause_time_ratio = 1.0;
1064       }
1065     }
1066   }
1067 
1068   bool new_in_marking_window = collector_state()->in_marking_window();
1069   bool new_in_marking_window_im = false;
1070   if (last_pause_included_initial_mark) {
1071     new_in_marking_window = true;
1072     new_in_marking_window_im = true;
1073   }
1074 
1075   if (collector_state()->last_young_gc()) {
1076     // This is supposed to to be the "last young GC" before we start
1077     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1078     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1079 

1080     if (next_gc_should_be_mixed("start mixed GCs",
1081                                 "do not start mixed GCs")) {
1082       collector_state()->set_gcs_are_young(false);
1083     }
1084 




1085     collector_state()->set_last_young_gc(false);
1086   }
1087 
1088   if (!collector_state()->last_gc_was_young()) {
1089     // This is a mixed GC. Here we decide whether to continue doing
1090     // mixed GCs or not.
1091 
1092     if (!next_gc_should_be_mixed("continue mixed GCs",
1093                                  "do not continue mixed GCs")) {
1094       collector_state()->set_gcs_are_young(true);
1095     }
1096   }
1097 
1098   _short_lived_surv_rate_group->start_adding_regions();
1099   // Do that for any other surv rate groups
1100 
1101   if (update_stats) {
1102     double cost_per_card_ms = 0.0;
1103     double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1104     if (_pending_cards > 0) {


1582     expansion_region_num = (uint) ceil(expansion_region_num_d);
1583   } else {
1584     assert(expansion_region_num == 0, "sanity");
1585   }
1586   _young_list_max_length = _young_list_target_length + expansion_region_num;
1587   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1588 }
1589 
1590 // Calculates survivor space parameters.
1591 void G1CollectorPolicy::update_survivors_policy() {
1592   double max_survivor_regions_d =
1593                  (double) _young_list_target_length / (double) SurvivorRatio;
1594   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1595   // smaller than 1.0) we'll get 1.
1596   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1597 
1598   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1599         HeapRegion::GrainWords * _max_survivor_regions, counters());
1600 }
1601 
1602 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1603   // We actually check whether we are marking here and not if we are in a
1604   // reclamation phase. This means that we will schedule a concurrent mark
1605   // even while we are still in the process of reclaiming memory.
1606   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1607   if (!during_cycle) {
1608     ergo_verbose1(ErgoConcCycles,
1609                   "request concurrent cycle initiation",
1610                   ergo_format_reason("requested by GC cause")
1611                   ergo_format_str("GC cause"),
1612                   GCCause::to_string(gc_cause));
1613     collector_state()->set_initiate_conc_mark_if_possible(true);
1614     return true;
1615   } else {
1616     ergo_verbose1(ErgoConcCycles,
1617                   "do not request concurrent cycle initiation",
1618                   ergo_format_reason("concurrent cycle already in progress")
1619                   ergo_format_str("GC cause"),
1620                   GCCause::to_string(gc_cause));
1621     return false;
1622   }
1623 }
1624 
1625 void G1CollectorPolicy::decide_on_conc_mark_initiation() {

1626   // We are about to decide on whether this pause will be an
1627   // initial-mark pause.
1628 
1629   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1630   // will set it here if we have to. However, it should be cleared by
1631   // the end of the pause (it's only set for the duration of an
1632   // initial-mark pause).
1633   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1634 
1635   if (collector_state()->initiate_conc_mark_if_possible()) {
1636     // We had noticed on a previous pause that the heap occupancy has
1637     // gone over the initiating threshold and we should start a
1638     // concurrent marking cycle. So we might initiate one.
1639 
1640     if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
1641       // Initiate a new initial mark only if there is no marking or reclamation going
1642       // on.


1643 
1644       collector_state()->set_during_initial_mark_pause(true);








1645       // And we can now clear initiate_conc_mark_if_possible() as
1646       // we've already acted on it.
1647       collector_state()->set_initiate_conc_mark_if_possible(false);
1648 
1649       ergo_verbose0(ErgoConcCycles,
1650                   "initiate concurrent cycle",
1651                   ergo_format_reason("concurrent cycle initiation requested"));
1652     } else {
1653       // The concurrent marking thread is still finishing up the
1654       // previous cycle. If we start one right now the two cycles
1655       // overlap. In particular, the concurrent marking thread might
1656       // be in the process of clearing the next marking bitmap (which
1657       // we will use for the next cycle if we start one). Starting a
1658       // cycle now will be bad given that parts of the marking
1659       // information might get cleared by the marking thread. And we
1660       // cannot wait for the marking thread to finish the cycle as it
1661       // periodically yields while clearing the next marking bitmap
1662       // and, if it's in a yield point, it's waiting for us to
1663       // finish. So, at this point we will not start a cycle and we'll
1664       // let the concurrent marking thread complete the last one.


< prev index next >