< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page
rev 9431 : dihop-changes
rev 9433 : imported patch erik-jmasa-review
rev 9434 : imported patch fix-evac-failure-needs-stats


  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMark.hpp"
  28 #include "gc/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"

  31 #include "gc/g1/g1ErgoVerbose.hpp"
  32 #include "gc/g1/g1GCPhaseTimes.hpp"
  33 #include "gc/g1/g1Log.hpp"
  34 #include "gc/g1/heapRegion.inline.hpp"
  35 #include "gc/g1/heapRegionRemSet.hpp"
  36 #include "gc/shared/gcPolicyCounters.hpp"
  37 #include "runtime/arguments.hpp"
  38 #include "runtime/java.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "utilities/debug.hpp"

  41 
  42 // Different defaults for different number of GC threads
  43 // They were chosen by running GCOld and SPECjbb on debris with different
  44 //   numbers of GC threads and choosing them based on the results
  45 
  46 // all the same
  47 static double rs_length_diff_defaults[] = {
  48   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  49 };
  50 
  51 static double cost_per_card_ms_defaults[] = {
  52   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  53 };
  54 
  55 // all the same
  56 static double young_cards_per_entry_ratio_defaults[] = {
  57   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  58 };
  59 
  60 static double cost_per_entry_ms_defaults[] = {


 131   _collection_set(NULL),
 132   _collection_set_bytes_used_before(0),
 133 
 134   // Incremental CSet attributes
 135   _inc_cset_build_state(Inactive),
 136   _inc_cset_head(NULL),
 137   _inc_cset_tail(NULL),
 138   _inc_cset_bytes_used_before(0),
 139   _inc_cset_max_finger(NULL),
 140   _inc_cset_recorded_rs_lengths(0),
 141   _inc_cset_recorded_rs_lengths_diffs(0),
 142   _inc_cset_predicted_elapsed_time_ms(0.0),
 143   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
 144 
 145   // add here any more surv rate groups
 146   _recorded_survivor_regions(0),
 147   _recorded_survivor_head(NULL),
 148   _recorded_survivor_tail(NULL),
 149   _survivors_age_table(true),
 150 
 151   _gc_overhead_perc(0.0) {




 152 
 153   // SurvRateGroups below must be initialized after the predictor because they
 154   // indirectly use it through this object passed to their constructor.
 155   _short_lived_surv_rate_group =
 156     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 157   _survivor_surv_rate_group =
 158     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 159 
 160   // Set up the region size and associated fields. Given that the
 161   // policy is created before the heap, we have to set this up here,
 162   // so it's done as soon as possible.
 163 
 164   // It would have been natural to pass initial_heap_byte_size() and
 165   // max_heap_byte_size() to setup_heap_region_size() but those have
 166   // not been set up at this point since they should be aligned with
 167   // the region size. So, there is a circular dependency here. We base
 168   // the region size on the heap size, but the heap size should be
 169   // aligned with the region size. To get around this we use the
 170   // unaligned values for the heap.
 171   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);


 271   assert(GCTimeRatio > 0,
 272          "we should have set it to a default value set_g1_gc_flags() "
 273          "if a user set it to 0");
 274   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 275 
 276   uintx reserve_perc = G1ReservePercent;
 277   // Put an artificial ceiling on this so that it's not set to a silly value.
 278   if (reserve_perc > 50) {
 279     reserve_perc = 50;
 280     warning("G1ReservePercent is set to a value that is too large, "
 281             "it's been updated to " UINTX_FORMAT, reserve_perc);
 282   }
 283   _reserve_factor = (double) reserve_perc / 100.0;
 284   // This will be set when the heap is expanded
 285   // for the first time during initialization.
 286   _reserve_regions = 0;
 287 
 288   _collectionSetChooser = new CollectionSetChooser();
 289 }
 290 




 291 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
 292   return _predictor.get_new_prediction(seq);
 293 }
 294 
 295 void G1CollectorPolicy::initialize_alignments() {
 296   _space_alignment = HeapRegion::GrainBytes;
 297   size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
 298   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 299   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 300 }
 301 
 302 void G1CollectorPolicy::initialize_flags() {
 303   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
 304     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
 305   }
 306 
 307   if (SurvivorRatio < 1) {
 308     vm_exit_during_initialization("Invalid survivor ratio specified");
 309   }
 310   CollectorPolicy::initialize_flags();
 311   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
 312 }
 313 
 314 void G1CollectorPolicy::post_heap_initialize() {
 315   uintx max_regions = G1CollectedHeap::heap()->max_regions();
 316   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
 317   if (max_young_size != MaxNewSize) {
 318     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
 319   }


 320 }
 321 
 322 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
 323 
 324 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
 325         _min_desired_young_length(0), _max_desired_young_length(0) {
 326   if (FLAG_IS_CMDLINE(NewRatio)) {
 327     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 328       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
 329     } else {
 330       _sizer_kind = SizerNewRatio;
 331       _adaptive_size = false;
 332       return;
 333     }
 334   }
 335 
 336   if (NewSize > MaxNewSize) {
 337     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 338       warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
 339               "A new max generation size of " SIZE_FORMAT "k will be used.",


 505       double now_sec = os::elapsedTime();
 506       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 507       double alloc_rate_ms = predict_alloc_rate_ms();
 508       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 509     } else {
 510       // otherwise we don't have enough info to make the prediction
 511     }
 512   }
 513   desired_min_length += base_min_length;
 514   // make sure we don't go below any user-defined minimum bound
 515   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 516 }
 517 
 518 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
 519   // Here, we might want to also take into account any additional
 520   // constraints (i.e., user-defined minimum bound). Currently, we
 521   // effectively don't set this bound.
 522   return _young_gen_sizer->max_desired_young_length();
 523 }
 524 
 525 void G1CollectorPolicy::update_young_list_max_and_target_length() {
 526   update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
 527 }
 528 
 529 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
 530   update_young_list_target_length(rs_lengths);
 531   update_max_gc_locker_expansion();

 532 }
 533 
 534 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 535   _young_list_target_length = bounded_young_list_target_length(rs_lengths);


 536 }
 537 
 538 void G1CollectorPolicy::update_young_list_target_length() {
 539   update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
 540 }
 541 
 542 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
 543   // Calculate the absolute and desired min bounds.
 544 
 545   // This is how many young regions we already have (currently: the survivors).
 546   uint base_min_length = recorded_survivor_regions();
 547   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 548   // This is the absolute minimum young length. Ensure that we
 549   // will at least have one eden region available for allocation.
 550   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
 551   // If we shrank the young list target it should not shrink below the current size.
 552   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 553   // Calculate the absolute and desired max bounds.
 554 
 555   // We will try our best not to "eat" into the reserve.
 556   uint absolute_max_length = 0;
 557   if (_free_regions_at_end_of_collection > _reserve_regions) {
 558     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 559   }
 560   uint desired_max_length = calculate_young_list_desired_max_length();
 561   if (desired_max_length > absolute_max_length) {
 562     desired_max_length = absolute_max_length;
 563   }
 564 
 565   uint young_list_target_length = 0;
 566   if (adaptive_young_list_length()) {
 567     if (collector_state()->gcs_are_young()) {
 568       young_list_target_length =
 569                         calculate_young_list_target_length(rs_lengths,
 570                                                            base_min_length,
 571                                                            desired_min_length,
 572                                                            desired_max_length);
 573     } else {
 574       // Don't calculate anything and let the code below bound it to
 575       // the desired_min_length, i.e., do the next GC as soon as
 576       // possible to maximize how many old regions we can add to it.
 577     }
 578   } else {
 579     // The user asked for a fixed young gen so we'll fix the young gen
 580     // whether the next GC is young or mixed.
 581     young_list_target_length = _young_list_fixed_length;
 582   }
 583 











 584   // Make sure we don't go over the desired max length, nor under the
 585   // desired min length. In case they clash, desired_min_length wins
 586   // which is why that test is second.
 587   if (young_list_target_length > desired_max_length) {
 588     young_list_target_length = desired_max_length;
 589   }
 590   if (young_list_target_length < desired_min_length) {
 591     young_list_target_length = desired_min_length;
 592   }
 593 
 594   assert(young_list_target_length > recorded_survivor_regions(),
 595          "we should be able to allocate at least one eden region");
 596   assert(young_list_target_length >= absolute_min_length, "post-condition");
 597 
 598   return young_list_target_length;

 599 }
 600 
 601 uint
 602 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 603                                                      uint base_min_length,
 604                                                      uint desired_min_length,
 605                                                      uint desired_max_length) const {
 606   assert(adaptive_young_list_length(), "pre-condition");
 607   assert(collector_state()->gcs_are_young(), "only call this for young GCs");
 608 
 609   // In case some edge-condition makes the desired max length too small...
 610   if (desired_max_length <= desired_min_length) {
 611     return desired_min_length;
 612   }
 613 
 614   // We'll adjust min_young_length and max_young_length not to include
 615   // the already allocated young regions (i.e., so they reflect the
 616   // min and max eden regions we'll allocate). The base_min_length
 617   // will be reflected in the predictions by the
 618   // survivor_regions_evac_time prediction.


 821   // "Nuke" the heuristics that control the young/mixed GC
 822   // transitions and make sure we start with young GCs after the Full GC.
 823   collector_state()->set_gcs_are_young(true);
 824   collector_state()->set_last_young_gc(false);
 825   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 826   collector_state()->set_during_initial_mark_pause(false);
 827   collector_state()->set_in_marking_window(false);
 828   collector_state()->set_in_marking_window_im(false);
 829 
 830   _short_lived_surv_rate_group->start_adding_regions();
 831   // also call this on any additional surv rate groups
 832 
 833   record_survivor_regions(0, NULL, NULL);
 834 
 835   _free_regions_at_end_of_collection = _g1->num_free_regions();
 836   // Reset survivors SurvRateGroup.
 837   _survivor_surv_rate_group->reset();
 838   update_young_list_max_and_target_length();
 839   update_rs_lengths_prediction();
 840   _collectionSetChooser->clear();




 841 }
 842 
 843 void G1CollectorPolicy::record_stop_world_start() {
 844   _stop_world_start = os::elapsedTime();
 845 }
 846 
 847 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
 848   // We only need to do this here as the policy will only be applied
 849   // to the GC we're about to start. so, no point is calculating this
 850   // every time we calculate / recalculate the target young length.
 851   update_survivors_policy();
 852 
 853   assert(_g1->used() == _g1->recalculate_used(),
 854          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
 855          _g1->used(), _g1->recalculate_used());
 856 
 857   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 858   _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
 859   _stop_world_start = 0.0;
 860 


 878 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 879                                                    mark_init_elapsed_time_ms) {
 880   collector_state()->set_during_marking(true);
 881   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 882   collector_state()->set_during_initial_mark_pause(false);
 883   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 884 }
 885 
 886 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 887   _mark_remark_start_sec = os::elapsedTime();
 888   collector_state()->set_during_marking(false);
 889 }
 890 
 891 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 892   double end_time_sec = os::elapsedTime();
 893   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 894   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 895   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 896   _prev_collection_pause_end_ms += elapsed_time_ms;
 897 
 898   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
 899 }
 900 
 901 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 902   _mark_cleanup_start_sec = os::elapsedTime();
 903 }
 904 
 905 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 906   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
 907                                                               "skip last young-only gc");
 908   collector_state()->set_last_young_gc(should_continue_with_reclaim);




 909   collector_state()->set_in_marking_window(false);
 910 }
 911 
 912 void G1CollectorPolicy::record_concurrent_pause() {
 913   if (_stop_world_start > 0.0) {
 914     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 915     _trace_young_gen_time_data.record_yield_time(yield_ms);
 916   }
 917 }
 918 
 919 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
 920   return phase_times()->average_time_ms(phase);
 921 }
 922 
 923 double G1CollectorPolicy::young_other_time_ms() const {
 924   return phase_times()->young_cset_choice_time_ms() +
 925          phase_times()->young_free_cset_time_ms();
 926 }
 927 
 928 double G1CollectorPolicy::non_young_other_time_ms() const {


 935   return pause_time_ms -
 936          average_time_ms(G1GCPhaseTimes::UpdateRS) -
 937          average_time_ms(G1GCPhaseTimes::ScanRS) -
 938          average_time_ms(G1GCPhaseTimes::ObjCopy) -
 939          average_time_ms(G1GCPhaseTimes::Termination);
 940 }
 941 
 942 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 943   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 944 }
 945 
 946 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
 947   return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
 948 }
 949 
 950 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 951   if (about_to_start_mixed_phase()) {
 952     return false;
 953   }
 954 
 955   size_t marking_initiating_used_threshold =
 956     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 957   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 958   size_t alloc_byte_size = alloc_word_size * HeapWordSize;

 959 
 960   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 961     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 962       ergo_verbose5(ErgoConcCycles,
 963         "request concurrent cycle initiation",
 964         ergo_format_reason("occupancy higher than threshold")
 965         ergo_format_byte("occupancy")
 966         ergo_format_byte("allocation request")
 967         ergo_format_byte_perc("threshold")
 968         ergo_format_str("source"),
 969         cur_used_bytes,
 970         alloc_byte_size,
 971         marking_initiating_used_threshold,
 972         (double) InitiatingHeapOccupancyPercent,
 973         source);
 974       return true;
 975     } else {
 976       ergo_verbose5(ErgoConcCycles,
 977         "do not request concurrent cycle initiation",
 978         ergo_format_reason("still doing mixed collections")
 979         ergo_format_byte("occupancy")
 980         ergo_format_byte("allocation request")
 981         ergo_format_byte_perc("threshold")
 982         ergo_format_str("source"),
 983         cur_used_bytes,
 984         alloc_byte_size,
 985         marking_initiating_used_threshold,
 986         (double) InitiatingHeapOccupancyPercent,
 987         source);
 988     }
 989   }
 990 
 991   return false;
 992 }
 993 
 994 // Anything below that is considered to be zero
 995 #define MIN_TIMER_GRANULARITY 0.0000001
 996 
 997 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
 998   double end_time_sec = os::elapsedTime();
 999   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
1000          "otherwise, the subtraction below does not make sense");
1001   size_t rs_size =
1002             _cur_collection_pause_used_regions_at_start - cset_region_length();
1003   size_t cur_used_bytes = _g1->used();
1004   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1005   bool last_pause_included_initial_mark = false;
1006   bool update_stats = !_g1->evacuation_failed();
1007 
1008 #ifndef PRODUCT
1009   if (G1YoungSurvRateVerbose) {
1010     gclog_or_tty->cr();
1011     _short_lived_surv_rate_group->print();
1012     // do that for any other surv rate groups too
1013   }
1014 #endif // PRODUCT
1015 


1016   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1017   if (last_pause_included_initial_mark) {
1018     record_concurrent_mark_init_end(0.0);
1019   } else {
1020     maybe_start_marking();
1021   }
1022 
1023   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
1024 
1025   if (update_stats) {
1026     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1027     // this is where we update the allocation rate of the application
1028     double app_time_ms =
1029       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1030     if (app_time_ms < MIN_TIMER_GRANULARITY) {
1031       // This usually happens due to the timer not having the required
1032       // granularity. Some Linuxes are the usual culprits.
1033       // We'll just set it to something (arbitrarily) small.
1034       app_time_ms = 1.0;
1035     }



1036     // We maintain the invariant that all objects allocated by mutator
1037     // threads will be allocated out of eden regions. So, we can use
1038     // the eden region number allocated since the previous GC to
1039     // calculate the application's allocate rate. The only exception
1040     // to that is humongous objects that are allocated separately. But
1041     // given that humongous object allocations do not really affect
1042     // either the pause's duration nor when the next pause will take
1043     // place we can safely ignore them here.
1044     uint regions_allocated = eden_cset_region_length();
1045     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1046     _alloc_rate_ms_seq->add(alloc_rate_ms);
1047 
1048     double interval_ms =
1049       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1050     update_recent_gc_times(end_time_sec, pause_time_ms);
1051     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1052     if (recent_avg_pause_time_ratio() < 0.0 ||
1053         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1054       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1055       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.


1060         _recent_avg_pause_time_ratio = 1.0;
1061       }
1062     }
1063   }
1064 
1065   bool new_in_marking_window = collector_state()->in_marking_window();
1066   bool new_in_marking_window_im = false;
1067   if (last_pause_included_initial_mark) {
1068     new_in_marking_window = true;
1069     new_in_marking_window_im = true;
1070   }
1071 
1072   if (collector_state()->last_young_gc()) {
1073     // This is supposed to to be the "last young GC" before we start
1074     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1075     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1076 
1077     if (next_gc_should_be_mixed("start mixed GCs",
1078                                 "do not start mixed GCs")) {
1079       collector_state()->set_gcs_are_young(false);



1080     }
1081 
1082     collector_state()->set_last_young_gc(false);
1083   }
1084 
1085   if (!collector_state()->last_gc_was_young()) {
1086     // This is a mixed GC. Here we decide whether to continue doing
1087     // mixed GCs or not.
1088 
1089     if (!next_gc_should_be_mixed("continue mixed GCs",
1090                                  "do not continue mixed GCs")) {
1091       collector_state()->set_gcs_are_young(true);
1092 
1093       maybe_start_marking();
1094     }
1095   }
1096 
1097   _short_lived_surv_rate_group->start_adding_regions();
1098   // Do that for any other surv rate groups
1099 
1100   if (update_stats) {
1101     double cost_per_card_ms = 0.0;
1102     double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1103     if (_pending_cards > 0) {
1104       cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1105       _cost_per_card_ms_seq->add(cost_per_card_ms);
1106     }
1107     _cost_scan_hcc_seq->add(cost_scan_hcc);
1108 


1160 
1161     if (young_cset_region_length() > 0) {
1162       _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1163                                                young_cset_region_length());
1164     }
1165 
1166     if (old_cset_region_length() > 0) {
1167       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1168                                                    old_cset_region_length());
1169     }
1170 
1171     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1172 
1173     _pending_cards_seq->add((double) _pending_cards);
1174     _rs_lengths_seq->add((double) _max_rs_lengths);
1175   }
1176 
1177   collector_state()->set_in_marking_window(new_in_marking_window);
1178   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1179   _free_regions_at_end_of_collection = _g1->num_free_regions();
1180   update_young_list_max_and_target_length();




1181   update_rs_lengths_prediction();
1182 





1183   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1184   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1185 
1186   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1187 
1188   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1189     ergo_verbose2(ErgoTiming,
1190                   "adjust concurrent refinement thresholds",
1191                   ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1192                   ergo_format_ms("Update RS time goal")
1193                   ergo_format_ms("Scan HCC time"),
1194                   update_rs_time_goal_ms,
1195                   scan_hcc_time_ms);
1196 
1197     update_rs_time_goal_ms = 0;
1198   } else {
1199     update_rs_time_goal_ms -= scan_hcc_time_ms;
1200   }
1201   adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1202                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1203                                update_rs_time_goal_ms);
1204 
1205   _collectionSetChooser->verify();
1206 }
1207 















































1208 #define EXT_SIZE_FORMAT "%.1f%s"
1209 #define EXT_SIZE_PARAMS(bytes)                                  \
1210   byte_size_in_proper_unit((double)(bytes)),                    \
1211   proper_unit_for_byte_size((bytes))
1212 
1213 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1214   YoungList* young_list = _g1->young_list();
1215   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1216   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1217   _heap_capacity_bytes_before_gc = _g1->capacity();
1218   _heap_used_bytes_before_gc = _g1->used();
1219   _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1220 
1221   _eden_capacity_bytes_before_gc =
1222          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1223 
1224   if (full) {
1225     _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1226   }
1227 }


1700 
1701 public:
1702   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1703       AbstractGangTask("ParKnownGarbageTask"),
1704       _hrSorted(hrSorted), _chunk_size(chunk_size),
1705       _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1706 
1707   void work(uint worker_id) {
1708     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1709     _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1710   }
1711 };
1712 
1713 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
1714   assert(n_workers > 0, "Active gc workers should be greater than 0");
1715   const uint overpartition_factor = 4;
1716   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1717   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1718 }
1719 
1720 void
1721 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1722   _collectionSetChooser->clear();
1723 
1724   WorkGang* workers = _g1->workers();
1725   uint n_workers = workers->active_workers();
1726 
1727   uint n_regions = _g1->num_regions();
1728   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1729   _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1730   ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1731   workers->run_task(&par_known_garbage_task);
1732 
1733   _collectionSetChooser->sort_regions();
1734 
1735   double end_sec = os::elapsedTime();
1736   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1737   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1738   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1739   _prev_collection_pause_end_ms += elapsed_time_ms;
1740   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);

1741 }
1742 
1743 // Add the heap region at the head of the non-incremental collection set
1744 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1745   assert(_inc_cset_build_state == Active, "Precondition");
1746   assert(hr->is_old(), "the region should be old");
1747 
1748   assert(!hr->in_collection_set(), "should not already be in the CSet");
1749   _g1->register_old_region_with_cset(hr);
1750   hr->set_next_in_collection_set(_collection_set);
1751   _collection_set = hr;
1752   _collection_set_bytes_used_before += hr->used();
1753   size_t rs_length = hr->rem_set()->occupied();
1754   _recorded_rs_lengths += rs_length;
1755   _old_cset_region_length += 1;
1756 }
1757 
1758 // Initialize the per-collection-set information
1759 void G1CollectorPolicy::start_incremental_cset_building() {
1760   assert(_inc_cset_build_state == Inactive, "Precondition");


1934     csr = next;
1935   }
1936 }
1937 #endif // !PRODUCT
1938 
1939 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1940   // Returns the given amount of reclaimable bytes (that represents
1941   // the amount of reclaimable space still to be collected) as a
1942   // percentage of the current heap capacity.
1943   size_t capacity_bytes = _g1->capacity();
1944   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1945 }
1946 
1947 void G1CollectorPolicy::maybe_start_marking() {
1948   if (need_to_start_conc_mark("end of GC")) {
1949     // Note: this might have already been set, if during the last
1950     // pause we decided to start a cycle but at the beginning of
1951     // this pause we decided to postpone it. That's OK.
1952     collector_state()->set_initiate_conc_mark_if_possible(true);
1953   }





















































1954 }
1955 
1956 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1957                                                 const char* false_action_str) const {
1958   CollectionSetChooser* cset_chooser = _collectionSetChooser;
1959   if (cset_chooser->is_empty()) {
1960     ergo_verbose0(ErgoMixedGCs,
1961                   false_action_str,
1962                   ergo_format_reason("candidate old regions not available"));
1963     return false;
1964   }
1965 
1966   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1967   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1968   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1969   double threshold = (double) G1HeapWastePercent;
1970   if (reclaimable_perc <= threshold) {
1971     ergo_verbose4(ErgoMixedGCs,
1972               false_action_str,
1973               ergo_format_reason("reclaimable percentage not over threshold")




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMark.hpp"
  28 #include "gc/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"
  31 #include "gc/g1/g1IHOPControl.hpp"
  32 #include "gc/g1/g1ErgoVerbose.hpp"
  33 #include "gc/g1/g1GCPhaseTimes.hpp"
  34 #include "gc/g1/g1Log.hpp"
  35 #include "gc/g1/heapRegion.inline.hpp"
  36 #include "gc/g1/heapRegionRemSet.hpp"
  37 #include "gc/shared/gcPolicyCounters.hpp"
  38 #include "runtime/arguments.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/mutexLocker.hpp"
  41 #include "utilities/debug.hpp"
  42 #include "utilities/pair.hpp"
  43 
  44 // Different defaults for different number of GC threads
  45 // They were chosen by running GCOld and SPECjbb on debris with different
  46 //   numbers of GC threads and choosing them based on the results
  47 
  48 // all the same
  49 static double rs_length_diff_defaults[] = {
  50   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  51 };
  52 
  53 static double cost_per_card_ms_defaults[] = {
  54   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  55 };
  56 
  57 // all the same
  58 static double young_cards_per_entry_ratio_defaults[] = {
  59   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  60 };
  61 
  62 static double cost_per_entry_ms_defaults[] = {


 133   _collection_set(NULL),
 134   _collection_set_bytes_used_before(0),
 135 
 136   // Incremental CSet attributes
 137   _inc_cset_build_state(Inactive),
 138   _inc_cset_head(NULL),
 139   _inc_cset_tail(NULL),
 140   _inc_cset_bytes_used_before(0),
 141   _inc_cset_max_finger(NULL),
 142   _inc_cset_recorded_rs_lengths(0),
 143   _inc_cset_recorded_rs_lengths_diffs(0),
 144   _inc_cset_predicted_elapsed_time_ms(0.0),
 145   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
 146 
 147   // add here any more surv rate groups
 148   _recorded_survivor_regions(0),
 149   _recorded_survivor_head(NULL),
 150   _recorded_survivor_tail(NULL),
 151   _survivors_age_table(true),
 152 
 153   _gc_overhead_perc(0.0),
 154 
 155   _last_old_allocated_bytes(0),
 156   _ihop_control(NULL),
 157   _initial_mark_to_mixed() {
 158 
 159   // SurvRateGroups below must be initialized after the predictor because they
 160   // indirectly use it through this object passed to their constructor.
 161   _short_lived_surv_rate_group =
 162     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 163   _survivor_surv_rate_group =
 164     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 165 
 166   // Set up the region size and associated fields. Given that the
 167   // policy is created before the heap, we have to set this up here,
 168   // so it's done as soon as possible.
 169 
 170   // It would have been natural to pass initial_heap_byte_size() and
 171   // max_heap_byte_size() to setup_heap_region_size() but those have
 172   // not been set up at this point since they should be aligned with
 173   // the region size. So, there is a circular dependency here. We base
 174   // the region size on the heap size, but the heap size should be
 175   // aligned with the region size. To get around this we use the
 176   // unaligned values for the heap.
 177   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);


 277   assert(GCTimeRatio > 0,
 278          "we should have set it to a default value set_g1_gc_flags() "
 279          "if a user set it to 0");
 280   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 281 
 282   uintx reserve_perc = G1ReservePercent;
 283   // Put an artificial ceiling on this so that it's not set to a silly value.
 284   if (reserve_perc > 50) {
 285     reserve_perc = 50;
 286     warning("G1ReservePercent is set to a value that is too large, "
 287             "it's been updated to " UINTX_FORMAT, reserve_perc);
 288   }
 289   _reserve_factor = (double) reserve_perc / 100.0;
 290   // This will be set when the heap is expanded
 291   // for the first time during initialization.
 292   _reserve_regions = 0;
 293 
 294   _collectionSetChooser = new CollectionSetChooser();
 295 }
 296 
 297 G1CollectorPolicy::~G1CollectorPolicy() {
 298   delete _ihop_control;
 299 }
 300 
 301 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
 302   return _predictor.get_new_prediction(seq);
 303 }
 304 
 305 void G1CollectorPolicy::initialize_alignments() {
 306   _space_alignment = HeapRegion::GrainBytes;
 307   size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
 308   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 309   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 310 }
 311 
 312 void G1CollectorPolicy::initialize_flags() {
 313   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
 314     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
 315   }
 316 
 317   if (SurvivorRatio < 1) {
 318     vm_exit_during_initialization("Invalid survivor ratio specified");
 319   }
 320   CollectorPolicy::initialize_flags();
 321   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
 322 }
 323 
 324 void G1CollectorPolicy::post_heap_initialize() {
 325   uintx max_regions = G1CollectedHeap::heap()->max_regions();
 326   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
 327   if (max_young_size != MaxNewSize) {
 328     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
 329   }
 330 
 331   _ihop_control = create_ihop_control();
 332 }
 333 
 334 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
 335 
 336 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
 337         _min_desired_young_length(0), _max_desired_young_length(0) {
 338   if (FLAG_IS_CMDLINE(NewRatio)) {
 339     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 340       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
 341     } else {
 342       _sizer_kind = SizerNewRatio;
 343       _adaptive_size = false;
 344       return;
 345     }
 346   }
 347 
 348   if (NewSize > MaxNewSize) {
 349     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 350       warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
 351               "A new max generation size of " SIZE_FORMAT "k will be used.",


 517       double now_sec = os::elapsedTime();
 518       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 519       double alloc_rate_ms = predict_alloc_rate_ms();
 520       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 521     } else {
 522       // otherwise we don't have enough info to make the prediction
 523     }
 524   }
 525   desired_min_length += base_min_length;
 526   // make sure we don't go below any user-defined minimum bound
 527   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 528 }
 529 
 530 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
 531   // Here, we might want to also take into account any additional
 532   // constraints (i.e., user-defined minimum bound). Currently, we
 533   // effectively don't set this bound.
 534   return _young_gen_sizer->max_desired_young_length();
 535 }
 536 
 537 uint G1CollectorPolicy::update_young_list_max_and_target_length() {
 538   return update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
 539 }
 540 
 541 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
 542   uint unbounded_target_length = update_young_list_target_length(rs_lengths);
 543   update_max_gc_locker_expansion();
 544   return unbounded_target_length;
 545 }
 546 
 547 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 548   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
 549   _young_list_target_length = young_lengths.first;
 550   return young_lengths.second;
 551 }
 552 
 553 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
 554   YoungTargetLengths result;

 555 
 556   // Calculate the absolute and desired min bounds first.

 557 
 558   // This is how many young regions we already have (currently: the survivors).
 559   uint base_min_length = recorded_survivor_regions();
 560   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 561   // This is the absolute minimum young length. Ensure that we
 562   // will at least have one eden region available for allocation.
 563   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
 564   // If we shrank the young list target it should not shrink below the current size.
 565   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 566   // Calculate the absolute and desired max bounds.
 567 





 568   uint desired_max_length = calculate_young_list_desired_max_length();



 569 
 570   uint young_list_target_length = 0;
 571   if (adaptive_young_list_length()) {
 572     if (collector_state()->gcs_are_young()) {
 573       young_list_target_length =
 574                         calculate_young_list_target_length(rs_lengths,
 575                                                            base_min_length,
 576                                                            desired_min_length,
 577                                                            desired_max_length);
 578     } else {
 579       // Don't calculate anything and let the code below bound it to
 580       // the desired_min_length, i.e., do the next GC as soon as
 581       // possible to maximize how many old regions we can add to it.
 582     }
 583   } else {
 584     // The user asked for a fixed young gen so we'll fix the young gen
 585     // whether the next GC is young or mixed.
 586     young_list_target_length = _young_list_fixed_length;
 587   }
 588 
 589   result.second = young_list_target_length;
 590   
 591   // We will try our best not to "eat" into the reserve.
 592   uint absolute_max_length = 0;
 593   if (_free_regions_at_end_of_collection > _reserve_regions) {
 594     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 595   }
 596   if (desired_max_length > absolute_max_length) {
 597     desired_max_length = absolute_max_length;
 598   }
 599 
 600   // Make sure we don't go over the desired max length, nor under the
 601   // desired min length. In case they clash, desired_min_length wins
 602   // which is why that test is second.
 603   if (young_list_target_length > desired_max_length) {
 604     young_list_target_length = desired_max_length;
 605   }
 606   if (young_list_target_length < desired_min_length) {
 607     young_list_target_length = desired_min_length;
 608   }
 609 
 610   assert(young_list_target_length > recorded_survivor_regions(),
 611          "we should be able to allocate at least one eden region");
 612   assert(young_list_target_length >= absolute_min_length, "post-condition");
 613 
 614   result.first = young_list_target_length;
 615   return result;
 616 }
 617 
 618 uint
 619 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 620                                                      uint base_min_length,
 621                                                      uint desired_min_length,
 622                                                      uint desired_max_length) const {
 623   assert(adaptive_young_list_length(), "pre-condition");
 624   assert(collector_state()->gcs_are_young(), "only call this for young GCs");
 625 
 626   // In case some edge-condition makes the desired max length too small...
 627   if (desired_max_length <= desired_min_length) {
 628     return desired_min_length;
 629   }
 630 
 631   // We'll adjust min_young_length and max_young_length not to include
 632   // the already allocated young regions (i.e., so they reflect the
 633   // min and max eden regions we'll allocate). The base_min_length
 634   // will be reflected in the predictions by the
 635   // survivor_regions_evac_time prediction.


 838   // "Nuke" the heuristics that control the young/mixed GC
 839   // transitions and make sure we start with young GCs after the Full GC.
 840   collector_state()->set_gcs_are_young(true);
 841   collector_state()->set_last_young_gc(false);
 842   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 843   collector_state()->set_during_initial_mark_pause(false);
 844   collector_state()->set_in_marking_window(false);
 845   collector_state()->set_in_marking_window_im(false);
 846 
 847   _short_lived_surv_rate_group->start_adding_regions();
 848   // also call this on any additional surv rate groups
 849 
 850   record_survivor_regions(0, NULL, NULL);
 851 
 852   _free_regions_at_end_of_collection = _g1->num_free_regions();
 853   // Reset survivors SurvRateGroup.
 854   _survivor_surv_rate_group->reset();
 855   update_young_list_max_and_target_length();
 856   update_rs_lengths_prediction();
 857   _collectionSetChooser->clear();
 858 
 859   _last_old_allocated_bytes = 0;
 860 
 861   record_pause(FullGC, _full_collection_start_sec, end_sec);
 862 }
 863 
 864 void G1CollectorPolicy::record_stop_world_start() {
 865   _stop_world_start = os::elapsedTime();
 866 }
 867 
 868 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
 869   // We only need to do this here as the policy will only be applied
 870   // to the GC we're about to start. so, no point is calculating this
 871   // every time we calculate / recalculate the target young length.
 872   update_survivors_policy();
 873 
 874   assert(_g1->used() == _g1->recalculate_used(),
 875          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
 876          _g1->used(), _g1->recalculate_used());
 877 
 878   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 879   _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
 880   _stop_world_start = 0.0;
 881 


 899 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 900                                                    mark_init_elapsed_time_ms) {
 901   collector_state()->set_during_marking(true);
 902   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 903   collector_state()->set_during_initial_mark_pause(false);
 904   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 905 }
 906 
 907 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 908   _mark_remark_start_sec = os::elapsedTime();
 909   collector_state()->set_during_marking(false);
 910 }
 911 
 912 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 913   double end_time_sec = os::elapsedTime();
 914   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 915   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 916   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 917   _prev_collection_pause_end_ms += elapsed_time_ms;
 918 
 919   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 920 }
 921 
 922 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 923   _mark_cleanup_start_sec = os::elapsedTime();
 924 }
 925 
 926 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 927   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
 928                                                               "skip last young-only gc");
 929   collector_state()->set_last_young_gc(should_continue_with_reclaim);
 930   // We skip the marking phase.
 931   if (!should_continue_with_reclaim) {
 932     abort_time_to_mixed_tracking();
 933   }
 934   collector_state()->set_in_marking_window(false);
 935 }
 936 
 937 void G1CollectorPolicy::record_concurrent_pause() {
 938   if (_stop_world_start > 0.0) {
 939     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 940     _trace_young_gen_time_data.record_yield_time(yield_ms);
 941   }
 942 }
 943 
 944 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
 945   return phase_times()->average_time_ms(phase);
 946 }
 947 
 948 double G1CollectorPolicy::young_other_time_ms() const {
 949   return phase_times()->young_cset_choice_time_ms() +
 950          phase_times()->young_free_cset_time_ms();
 951 }
 952 
 953 double G1CollectorPolicy::non_young_other_time_ms() const {


 960   return pause_time_ms -
 961          average_time_ms(G1GCPhaseTimes::UpdateRS) -
 962          average_time_ms(G1GCPhaseTimes::ScanRS) -
 963          average_time_ms(G1GCPhaseTimes::ObjCopy) -
 964          average_time_ms(G1GCPhaseTimes::Termination);
 965 }
 966 
 967 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 968   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 969 }
 970 
 971 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
 972   return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
 973 }
 974 
 975 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 976   if (about_to_start_mixed_phase()) {
 977     return false;
 978   }
 979 
 980   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
 981 
 982   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 983   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 984   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 985 
 986   if (marking_request_bytes > marking_initiating_used_threshold) {
 987     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 988       ergo_verbose5(ErgoConcCycles,
 989         "request concurrent cycle initiation",
 990         ergo_format_reason("occupancy higher than threshold")
 991         ergo_format_byte("occupancy")
 992         ergo_format_byte("allocation request")
 993         ergo_format_byte_perc("threshold")
 994         ergo_format_str("source"),
 995         cur_used_bytes,
 996         alloc_byte_size,
 997         marking_initiating_used_threshold,
 998         (double) marking_initiating_used_threshold / _g1->capacity() * 100,
 999         source);
1000       return true;
1001     } else {
1002       ergo_verbose5(ErgoConcCycles,
1003         "do not request concurrent cycle initiation",
1004         ergo_format_reason("still doing mixed collections")
1005         ergo_format_byte("occupancy")
1006         ergo_format_byte("allocation request")
1007         ergo_format_byte_perc("threshold")
1008         ergo_format_str("source"),
1009         cur_used_bytes,
1010         alloc_byte_size,
1011         marking_initiating_used_threshold,
1012         (double) InitiatingHeapOccupancyPercent,
1013         source);
1014     }
1015   }
1016 
1017   return false;
1018 }
1019 
1020 // Anything below that is considered to be zero
1021 #define MIN_TIMER_GRANULARITY 0.0000001
1022 
1023 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
1024   double end_time_sec = os::elapsedTime();
1025   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
1026          "otherwise, the subtraction below does not make sense");


1027   size_t cur_used_bytes = _g1->used();
1028   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1029   bool last_pause_included_initial_mark = false;
1030   bool update_stats = !_g1->evacuation_failed();
1031 
1032 #ifndef PRODUCT
1033   if (G1YoungSurvRateVerbose) {
1034     gclog_or_tty->cr();
1035     _short_lived_surv_rate_group->print();
1036     // do that for any other surv rate groups too
1037   }
1038 #endif // PRODUCT
1039 
1040   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
1041 
1042   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1043   if (last_pause_included_initial_mark) {
1044     record_concurrent_mark_init_end(0.0);
1045   } else {
1046     maybe_start_marking();
1047   }
1048 
1049   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);






1050   if (app_time_ms < MIN_TIMER_GRANULARITY) {
1051     // This usually happens due to the timer not having the required
1052     // granularity. Some Linuxes are the usual culprits.
1053     // We'll just set it to something (arbitrarily) small.
1054     app_time_ms = 1.0;
1055   }
1056 
1057   if (update_stats) {
1058     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1059     // We maintain the invariant that all objects allocated by mutator
1060     // threads will be allocated out of eden regions. So, we can use
1061     // the eden region number allocated since the previous GC to
1062     // calculate the application's allocate rate. The only exception
1063     // to that is humongous objects that are allocated separately. But
1064     // given that humongous object allocations do not really affect
1065     // either the pause's duration nor when the next pause will take
1066     // place we can safely ignore them here.
1067     uint regions_allocated = eden_cset_region_length();
1068     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1069     _alloc_rate_ms_seq->add(alloc_rate_ms);
1070 
1071     double interval_ms =
1072       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1073     update_recent_gc_times(end_time_sec, pause_time_ms);
1074     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1075     if (recent_avg_pause_time_ratio() < 0.0 ||
1076         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1077       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1078       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.


1083         _recent_avg_pause_time_ratio = 1.0;
1084       }
1085     }
1086   }
1087 
1088   bool new_in_marking_window = collector_state()->in_marking_window();
1089   bool new_in_marking_window_im = false;
1090   if (last_pause_included_initial_mark) {
1091     new_in_marking_window = true;
1092     new_in_marking_window_im = true;
1093   }
1094 
1095   if (collector_state()->last_young_gc()) {
1096     // This is supposed to to be the "last young GC" before we start
1097     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1098     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1099 
1100     if (next_gc_should_be_mixed("start mixed GCs",
1101                                 "do not start mixed GCs")) {
1102       collector_state()->set_gcs_are_young(false);
1103     } else {
1104       // We aborted the mixed GC phase early.
1105       abort_time_to_mixed_tracking();
1106     }
1107 
1108     collector_state()->set_last_young_gc(false);
1109   }
1110 
1111   if (!collector_state()->last_gc_was_young()) {
1112     // This is a mixed GC. Here we decide whether to continue doing
1113     // mixed GCs or not.

1114     if (!next_gc_should_be_mixed("continue mixed GCs",
1115                                  "do not continue mixed GCs")) {
1116       collector_state()->set_gcs_are_young(true);
1117 
1118       maybe_start_marking();
1119     }
1120   }
1121 
1122   _short_lived_surv_rate_group->start_adding_regions();
1123   // Do that for any other surv rate groups
1124 
1125   if (update_stats) {
1126     double cost_per_card_ms = 0.0;
1127     double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1128     if (_pending_cards > 0) {
1129       cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1130       _cost_per_card_ms_seq->add(cost_per_card_ms);
1131     }
1132     _cost_scan_hcc_seq->add(cost_scan_hcc);
1133 


1185 
1186     if (young_cset_region_length() > 0) {
1187       _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1188                                                young_cset_region_length());
1189     }
1190 
1191     if (old_cset_region_length() > 0) {
1192       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1193                                                    old_cset_region_length());
1194     }
1195 
1196     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1197 
1198     _pending_cards_seq->add((double) _pending_cards);
1199     _rs_lengths_seq->add((double) _max_rs_lengths);
1200   }
1201 
1202   collector_state()->set_in_marking_window(new_in_marking_window);
1203   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1204   _free_regions_at_end_of_collection = _g1->num_free_regions();
1205   // IHOP control wants to know the expected young gen length if it were not
1206   // restrained by the heap reserve. Using the actual length would make the
1207   // prediction too small and the limit the young gen every time we get to the
1208   // predicted target occupancy.
1209   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
1210   update_rs_lengths_prediction();
1211 
1212   update_ihop_prediction(app_time_ms / 1000.0,
1213                          _last_old_allocated_bytes,
1214                          last_unrestrained_young_length * HeapRegion::GrainBytes);
1215   _last_old_allocated_bytes = 0;
1216 
1217   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1218   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1219 
1220   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1221 
1222   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1223     ergo_verbose2(ErgoTiming,
1224                   "adjust concurrent refinement thresholds",
1225                   ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1226                   ergo_format_ms("Update RS time goal")
1227                   ergo_format_ms("Scan HCC time"),
1228                   update_rs_time_goal_ms,
1229                   scan_hcc_time_ms);
1230 
1231     update_rs_time_goal_ms = 0;
1232   } else {
1233     update_rs_time_goal_ms -= scan_hcc_time_ms;
1234   }
1235   adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1236                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1237                                update_rs_time_goal_ms);
1238 
1239   _collectionSetChooser->verify();
1240 }
1241 
1242 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
1243   return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent,
1244                                  G1CollectedHeap::heap()->max_capacity());
1245 }
1246 
1247 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s,
1248                                                size_t mutator_alloc_bytes,
1249                                                size_t young_gen_size) {
1250   // Always try to update IHOP prediction. Even evacuation failures give information
1251   // about e.g. whether to start IHOP earlier next time.
1252 
1253   // Avoid using really small application times that might create samples with
1254   // very high or very low values. They may be caused by e.g. back-to-back gcs.
1255   double const min_valid_time = 1e-6;
1256 
1257   bool report = false;
1258 
1259   double marking_to_mixed_time = -1.0;
1260   if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
1261     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
1262     assert(marking_to_mixed_time > 0.0,
1263            "Initial mark to mixed time must be larger than zero but is %.3f",
1264            marking_to_mixed_time);
1265     if (marking_to_mixed_time > min_valid_time) {
1266       _ihop_control->update_marking_length(marking_to_mixed_time);
1267       report = true;
1268     }
1269   }
1270 
1271   // As an approximation for the young gc promotion rates during marking we use
1272   // all of them. In many applications there are only a few if any young gcs during
1273   // marking, which makes any prediction useless. This increases the accuracy of the
1274   // prediction.
1275   if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
1276     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
1277     report = true;
1278   }
1279 
1280   if (report) {
1281     report_ihop_statistics();
1282   }
1283 }
1284 
1285 void G1CollectorPolicy::report_ihop_statistics() {
1286   _ihop_control->print();
1287 }
1288 
1289 #define EXT_SIZE_FORMAT "%.1f%s"
1290 #define EXT_SIZE_PARAMS(bytes)                                  \
1291   byte_size_in_proper_unit((double)(bytes)),                    \
1292   proper_unit_for_byte_size((bytes))
1293 
1294 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1295   YoungList* young_list = _g1->young_list();
1296   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1297   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1298   _heap_capacity_bytes_before_gc = _g1->capacity();
1299   _heap_used_bytes_before_gc = _g1->used();
1300   _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1301 
1302   _eden_capacity_bytes_before_gc =
1303          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1304 
1305   if (full) {
1306     _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1307   }
1308 }


1781 
1782 public:
1783   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1784       AbstractGangTask("ParKnownGarbageTask"),
1785       _hrSorted(hrSorted), _chunk_size(chunk_size),
1786       _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1787 
1788   void work(uint worker_id) {
1789     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1790     _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1791   }
1792 };
1793 
1794 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
1795   assert(n_workers > 0, "Active gc workers should be greater than 0");
1796   const uint overpartition_factor = 4;
1797   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1798   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1799 }
1800 
1801 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {

1802   _collectionSetChooser->clear();
1803 
1804   WorkGang* workers = _g1->workers();
1805   uint n_workers = workers->active_workers();
1806 
1807   uint n_regions = _g1->num_regions();
1808   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1809   _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1810   ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1811   workers->run_task(&par_known_garbage_task);
1812 
1813   _collectionSetChooser->sort_regions();
1814 
1815   double end_sec = os::elapsedTime();
1816   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1817   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1818   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1819   _prev_collection_pause_end_ms += elapsed_time_ms;
1820 
1821   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1822 }
1823 
1824 // Add the heap region at the head of the non-incremental collection set
1825 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1826   assert(_inc_cset_build_state == Active, "Precondition");
1827   assert(hr->is_old(), "the region should be old");
1828 
1829   assert(!hr->in_collection_set(), "should not already be in the CSet");
1830   _g1->register_old_region_with_cset(hr);
1831   hr->set_next_in_collection_set(_collection_set);
1832   _collection_set = hr;
1833   _collection_set_bytes_used_before += hr->used();
1834   size_t rs_length = hr->rem_set()->occupied();
1835   _recorded_rs_lengths += rs_length;
1836   _old_cset_region_length += 1;
1837 }
1838 
1839 // Initialize the per-collection-set information
1840 void G1CollectorPolicy::start_incremental_cset_building() {
1841   assert(_inc_cset_build_state == Inactive, "Precondition");


2015     csr = next;
2016   }
2017 }
2018 #endif // !PRODUCT
2019 
2020 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
2021   // Returns the given amount of reclaimable bytes (that represents
2022   // the amount of reclaimable space still to be collected) as a
2023   // percentage of the current heap capacity.
2024   size_t capacity_bytes = _g1->capacity();
2025   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
2026 }
2027 
2028 void G1CollectorPolicy::maybe_start_marking() {
2029   if (need_to_start_conc_mark("end of GC")) {
2030     // Note: this might have already been set, if during the last
2031     // pause we decided to start a cycle but at the beginning of
2032     // this pause we decided to postpone it. That's OK.
2033     collector_state()->set_initiate_conc_mark_if_possible(true);
2034   }
2035 }
2036 
2037 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const {
2038   assert(!collector_state()->full_collection(), "must be");
2039   if (collector_state()->during_initial_mark_pause()) {
2040     assert(collector_state()->last_gc_was_young(), "must be");
2041     assert(!collector_state()->last_young_gc(), "must be");
2042     return InitialMarkGC;
2043   } else if (collector_state()->last_young_gc()) {
2044     assert(!collector_state()->during_initial_mark_pause(), "must be");
2045     assert(collector_state()->last_gc_was_young(), "must be");   
2046     return LastYoungGC;
2047   } else if (!collector_state()->last_gc_was_young()) {
2048     assert(!collector_state()->during_initial_mark_pause(), "must be");
2049     assert(!collector_state()->last_young_gc(), "must be");
2050     return MixedGC;
2051   } else {
2052     assert(collector_state()->last_gc_was_young(), "must be");
2053     assert(!collector_state()->during_initial_mark_pause(), "must be");
2054     assert(!collector_state()->last_young_gc(), "must be");
2055     return YoungOnlyGC;
2056   }
2057 }
2058 
2059 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) {
2060   // Manage the MMU tracker. For some reason it ignores Full GCs.
2061   if (kind != FullGC) {
2062     _mmu_tracker->add_pause(start, end);
2063   }
2064   // Manage the mutator time tracking from initial mark to first mixed gc.
2065   switch (kind) {
2066     case FullGC:
2067       abort_time_to_mixed_tracking();
2068       break;
2069     case Cleanup:
2070     case Remark:
2071     case YoungOnlyGC:
2072     case LastYoungGC:
2073       _initial_mark_to_mixed.add_pause(end - start);
2074       break;
2075     case InitialMarkGC:
2076       _initial_mark_to_mixed.record_initial_mark_end(end);
2077       break;
2078     case MixedGC:
2079       _initial_mark_to_mixed.record_mixed_gc_start(start);
2080       break;
2081     default:
2082       ShouldNotReachHere();
2083   }
2084 }
2085 
2086 void G1CollectorPolicy::abort_time_to_mixed_tracking() {
2087   _initial_mark_to_mixed.reset();
2088 }
2089 
2090 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
2091                                                 const char* false_action_str) const {
2092   CollectionSetChooser* cset_chooser = _collectionSetChooser;
2093   if (cset_chooser->is_empty()) {
2094     ergo_verbose0(ErgoMixedGCs,
2095                   false_action_str,
2096                   ergo_format_reason("candidate old regions not available"));
2097     return false;
2098   }
2099 
2100   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
2101   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2102   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2103   double threshold = (double) G1HeapWastePercent;
2104   if (reclaimable_perc <= threshold) {
2105     ergo_verbose4(ErgoMixedGCs,
2106               false_action_str,
2107               ergo_format_reason("reclaimable percentage not over threshold")


< prev index next >