src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


 892         "do not request concurrent cycle initiation",
 893         ergo_format_reason("still doing mixed collections")
 894         ergo_format_byte("occupancy")
 895         ergo_format_byte("allocation request")
 896         ergo_format_byte_perc("threshold")
 897         ergo_format_str("source"),
 898         cur_used_bytes,
 899         alloc_byte_size,
 900         marking_initiating_used_threshold,
 901         (double) InitiatingHeapOccupancyPercent,
 902         source);
 903     }
 904   }
 905 
 906   return false;
 907 }
 908 
 909 // Anything below that is considered to be zero
 910 #define MIN_TIMER_GRANULARITY 0.0000001
 911 
 912 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
 913   double end_time_sec = os::elapsedTime();
 914   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
 915          "otherwise, the subtraction below does not make sense");
 916   size_t rs_size =
 917             _cur_collection_pause_used_regions_at_start - cset_region_length();
 918   size_t cur_used_bytes = _g1->used();
 919   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 920   bool last_pause_included_initial_mark = false;
 921   bool update_stats = !_g1->evacuation_failed();
 922 
 923 #ifndef PRODUCT
 924   if (G1YoungSurvRateVerbose) {
 925     gclog_or_tty->print_cr("");
 926     _short_lived_surv_rate_group->print();
 927     // do that for any other surv rate groups too
 928   }
 929 #endif // PRODUCT
 930 
 931   last_pause_included_initial_mark = during_initial_mark_pause();
 932   if (last_pause_included_initial_mark) {
 933     record_concurrent_mark_init_end(0.0);
 934   } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
 935     // Note: this might have already been set, if during the last
 936     // pause we decided to start a cycle but at the beginning of
 937     // this pause we decided to postpone it. That's OK.
 938     set_initiate_conc_mark_if_possible();
 939   }
 940 
 941   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
 942                           end_time_sec, false);
 943 



 944   if (update_stats) {
 945     _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
 946     // this is where we update the allocation rate of the application
 947     double app_time_ms =
 948       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 949     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 950       // This usually happens due to the timer not having the required
 951       // granularity. Some Linuxes are the usual culprits.
 952       // We'll just set it to something (arbitrarily) small.
 953       app_time_ms = 1.0;
 954     }
 955     // We maintain the invariant that all objects allocated by mutator
 956     // threads will be allocated out of eden regions. So, we can use
 957     // the eden region number allocated since the previous GC to
 958     // calculate the application's allocate rate. The only exception
 959     // to that is humongous objects that are allocated separately. But
 960     // given that humongous object allocations do not really affect
 961     // either the pause's duration nor when the next pause will take
 962     // place we can safely ignore them here.
 963     uint regions_allocated = eden_cset_region_length();


1879 }
1880 
1881 uint G1CollectorPolicy::calc_max_old_cset_length() {
1882   // The max old CSet region bound is based on the threshold expressed
1883   // as a percentage of the heap size. I.e., it should bound the
1884   // number of old regions added to the CSet irrespective of how many
1885   // of them are available.
1886 
1887   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1888   const size_t region_num = g1h->n_regions();
1889   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1890   size_t result = region_num * perc / 100;
1891   // emulate ceiling
1892   if (100 * result < region_num * perc) {
1893     result += 1;
1894   }
1895   return (uint) result;
1896 }
1897 
1898 
1899 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
1900   double young_start_time_sec = os::elapsedTime();
1901 
1902   YoungList* young_list = _g1->young_list();
1903   finalize_incremental_cset_building();
1904 
1905   guarantee(target_pause_time_ms > 0.0,
1906             err_msg("target_pause_time_ms = %1.6lf should be positive",
1907                     target_pause_time_ms));
1908   guarantee(_collection_set == NULL, "Precondition");
1909 
1910   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1911   double predicted_pause_time_ms = base_time_ms;
1912   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1913 
1914   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1915                 "start choosing CSet",
1916                 ergo_format_size("_pending_cards")
1917                 ergo_format_ms("predicted base time")
1918                 ergo_format_ms("remaining time")
1919                 ergo_format_ms("target pause time"),


2085     }
2086 
2087     cset_chooser->verify();
2088   }
2089 
2090   stop_incremental_cset_building();
2091 
2092   ergo_verbose5(ErgoCSetConstruction,
2093                 "finish choosing CSet",
2094                 ergo_format_region("eden")
2095                 ergo_format_region("survivors")
2096                 ergo_format_region("old")
2097                 ergo_format_ms("predicted pause time")
2098                 ergo_format_ms("target pause time"),
2099                 eden_region_length, survivor_region_length,
2100                 old_cset_region_length(),
2101                 predicted_pause_time_ms, target_pause_time_ms);
2102 
2103   double non_young_end_time_sec = os::elapsedTime();
2104   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);

2105 }
2106 
2107 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2108   if(TraceGen0Time) {
2109     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2110   }
2111 }
2112 
2113 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2114   if(TraceGen0Time) {
2115     _all_yield_times_ms.add(yield_time_ms);
2116   }
2117 }
2118 
2119 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2120   if(TraceGen0Time) {
2121     _total.add(pause_time_ms);
2122     _other.add(pause_time_ms - phase_times->accounted_time_ms());
2123     _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2124     _parallel.add(phase_times->cur_collection_par_time_ms());




 892         "do not request concurrent cycle initiation",
 893         ergo_format_reason("still doing mixed collections")
 894         ergo_format_byte("occupancy")
 895         ergo_format_byte("allocation request")
 896         ergo_format_byte_perc("threshold")
 897         ergo_format_str("source"),
 898         cur_used_bytes,
 899         alloc_byte_size,
 900         marking_initiating_used_threshold,
 901         (double) InitiatingHeapOccupancyPercent,
 902         source);
 903     }
 904   }
 905 
 906   return false;
 907 }
 908 
 909 // Anything below that is considered to be zero
 910 #define MIN_TIMER_GRANULARITY 0.0000001
 911 
 912 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
 913   double end_time_sec = os::elapsedTime();
 914   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
 915          "otherwise, the subtraction below does not make sense");
 916   size_t rs_size =
 917             _cur_collection_pause_used_regions_at_start - cset_region_length();
 918   size_t cur_used_bytes = _g1->used();
 919   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 920   bool last_pause_included_initial_mark = false;
 921   bool update_stats = !_g1->evacuation_failed();
 922 
 923 #ifndef PRODUCT
 924   if (G1YoungSurvRateVerbose) {
 925     gclog_or_tty->print_cr("");
 926     _short_lived_surv_rate_group->print();
 927     // do that for any other surv rate groups too
 928   }
 929 #endif // PRODUCT
 930 
 931   last_pause_included_initial_mark = during_initial_mark_pause();
 932   if (last_pause_included_initial_mark) {
 933     record_concurrent_mark_init_end(0.0);
 934   } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
 935     // Note: this might have already been set, if during the last
 936     // pause we decided to start a cycle but at the beginning of
 937     // this pause we decided to postpone it. That's OK.
 938     set_initiate_conc_mark_if_possible();
 939   }
 940 
 941   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
 942                           end_time_sec, false);
 943 
 944   evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
 945   evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
 946 
 947   if (update_stats) {
 948     _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
 949     // this is where we update the allocation rate of the application
 950     double app_time_ms =
 951       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 952     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 953       // This usually happens due to the timer not having the required
 954       // granularity. Some Linuxes are the usual culprits.
 955       // We'll just set it to something (arbitrarily) small.
 956       app_time_ms = 1.0;
 957     }
 958     // We maintain the invariant that all objects allocated by mutator
 959     // threads will be allocated out of eden regions. So, we can use
 960     // the eden region number allocated since the previous GC to
 961     // calculate the application's allocate rate. The only exception
 962     // to that is humongous objects that are allocated separately. But
 963     // given that humongous object allocations do not really affect
 964     // either the pause's duration nor when the next pause will take
 965     // place we can safely ignore them here.
 966     uint regions_allocated = eden_cset_region_length();


1882 }
1883 
1884 uint G1CollectorPolicy::calc_max_old_cset_length() {
1885   // The max old CSet region bound is based on the threshold expressed
1886   // as a percentage of the heap size. I.e., it should bound the
1887   // number of old regions added to the CSet irrespective of how many
1888   // of them are available.
1889 
1890   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1891   const size_t region_num = g1h->n_regions();
1892   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1893   size_t result = region_num * perc / 100;
1894   // emulate ceiling
1895   if (100 * result < region_num * perc) {
1896     result += 1;
1897   }
1898   return (uint) result;
1899 }
1900 
1901 
1902 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
1903   double young_start_time_sec = os::elapsedTime();
1904 
1905   YoungList* young_list = _g1->young_list();
1906   finalize_incremental_cset_building();
1907 
1908   guarantee(target_pause_time_ms > 0.0,
1909             err_msg("target_pause_time_ms = %1.6lf should be positive",
1910                     target_pause_time_ms));
1911   guarantee(_collection_set == NULL, "Precondition");
1912 
1913   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1914   double predicted_pause_time_ms = base_time_ms;
1915   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1916 
1917   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1918                 "start choosing CSet",
1919                 ergo_format_size("_pending_cards")
1920                 ergo_format_ms("predicted base time")
1921                 ergo_format_ms("remaining time")
1922                 ergo_format_ms("target pause time"),


2088     }
2089 
2090     cset_chooser->verify();
2091   }
2092 
2093   stop_incremental_cset_building();
2094 
2095   ergo_verbose5(ErgoCSetConstruction,
2096                 "finish choosing CSet",
2097                 ergo_format_region("eden")
2098                 ergo_format_region("survivors")
2099                 ergo_format_region("old")
2100                 ergo_format_ms("predicted pause time")
2101                 ergo_format_ms("target pause time"),
2102                 eden_region_length, survivor_region_length,
2103                 old_cset_region_length(),
2104                 predicted_pause_time_ms, target_pause_time_ms);
2105 
2106   double non_young_end_time_sec = os::elapsedTime();
2107   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2108   evacuation_info.set_collectionset_regions(cset_region_length());
2109 }
2110 
2111 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2112   if(TraceGen0Time) {
2113     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2114   }
2115 }
2116 
2117 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2118   if(TraceGen0Time) {
2119     _all_yield_times_ms.add(yield_time_ms);
2120   }
2121 }
2122 
2123 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2124   if(TraceGen0Time) {
2125     _total.add(pause_time_ms);
2126     _other.add(pause_time_ms - phase_times->accounted_time_ms());
2127     _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2128     _parallel.add(phase_times->cur_collection_par_time_ms());