< prev index next >

src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page




  90   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  91   _prev_collection_pause_end_ms(0.0),
  92   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  93   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  94   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  95   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  96   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  97   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  98   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _non_young_other_cost_per_region_ms_seq(
 103                                          new TruncatedSeq(TruncatedSeqLength)),
 104 
 105   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 106   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 107 
 108   _pause_time_target_ms((double) MaxGCPauseMillis),
 109 
 110   _gcs_are_young(true),
 111 
 112   _during_marking(false),
 113   _in_marking_window(false),
 114   _in_marking_window_im(false),
 115 
 116   _recent_prev_end_times_for_all_gcs_sec(
 117                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 118 
 119   _recent_avg_pause_time_ratio(0.0),
 120 
 121   _initiate_conc_mark_if_possible(false),
 122   _during_initial_mark_pause(false),
 123   _last_young_gc(false),
 124   _last_gc_was_young(false),
 125 
 126   _eden_used_bytes_before_gc(0),
 127   _survivor_used_bytes_before_gc(0),
 128   _heap_used_bytes_before_gc(0),
 129   _metaspace_used_bytes_before_gc(0),
 130   _eden_capacity_bytes_before_gc(0),
 131   _heap_capacity_bytes_before_gc(0),
 132 
 133   _eden_cset_region_length(0),
 134   _survivor_cset_region_length(0),
 135   _old_cset_region_length(0),
 136 
 137   _sigma(G1ConfidencePercent / 100.0),
 138 
 139   _collection_set(NULL),
 140   _collection_set_bytes_used_before(0),
 141 
 142   // Incremental CSet attributes
 143   _inc_cset_build_state(Inactive),
 144   _inc_cset_head(NULL),
 145   _inc_cset_tail(NULL),


 317 void G1CollectorPolicy::initialize_flags() {
 318   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
 319     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
 320   }
 321 
 322   if (SurvivorRatio < 1) {
 323     vm_exit_during_initialization("Invalid survivor ratio specified");
 324   }
 325   CollectorPolicy::initialize_flags();
 326   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
 327 }
 328 
 329 void G1CollectorPolicy::post_heap_initialize() {
 330   uintx max_regions = G1CollectedHeap::heap()->max_regions();
 331   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
 332   if (max_young_size != MaxNewSize) {
 333     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
 334   }
 335 }
 336 


 337 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
 338         _min_desired_young_length(0), _max_desired_young_length(0) {
 339   if (FLAG_IS_CMDLINE(NewRatio)) {
 340     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 341       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
 342     } else {
 343       _sizer_kind = SizerNewRatio;
 344       _adaptive_size = false;
 345       return;
 346     }
 347   }
 348 
 349   if (NewSize > MaxNewSize) {
 350     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 351       warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
 352               "A new max generation size of " SIZE_FORMAT "k will be used.",
 353               NewSize/K, MaxNewSize/K, NewSize/K);
 354     }
 355     MaxNewSize = NewSize;
 356   }


 535   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 536   // This is the absolute minimum young length. Ensure that we
 537   // will at least have one eden region available for allocation.
 538   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
 539   // If we shrank the young list target it should not shrink below the current size.
 540   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 541   // Calculate the absolute and desired max bounds.
 542 
 543   // We will try our best not to "eat" into the reserve.
 544   uint absolute_max_length = 0;
 545   if (_free_regions_at_end_of_collection > _reserve_regions) {
 546     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 547   }
 548   uint desired_max_length = calculate_young_list_desired_max_length();
 549   if (desired_max_length > absolute_max_length) {
 550     desired_max_length = absolute_max_length;
 551   }
 552 
 553   uint young_list_target_length = 0;
 554   if (adaptive_young_list_length()) {
 555     if (gcs_are_young()) {
 556       young_list_target_length =
 557                         calculate_young_list_target_length(rs_lengths,
 558                                                            base_min_length,
 559                                                            desired_min_length,
 560                                                            desired_max_length);
 561       _rs_lengths_prediction = rs_lengths;
 562     } else {
 563       // Don't calculate anything and let the code below bound it to
 564       // the desired_min_length, i.e., do the next GC as soon as
 565       // possible to maximize how many old regions we can add to it.
 566     }
 567   } else {
 568     // The user asked for a fixed young gen so we'll fix the young gen
 569     // whether the next GC is young or mixed.
 570     young_list_target_length = _young_list_fixed_length;
 571   }
 572 
 573   // Make sure we don't go over the desired max length, nor under the
 574   // desired min length. In case they clash, desired_min_length wins
 575   // which is why that test is second.


 577     young_list_target_length = desired_max_length;
 578   }
 579   if (young_list_target_length < desired_min_length) {
 580     young_list_target_length = desired_min_length;
 581   }
 582 
 583   assert(young_list_target_length > recorded_survivor_regions(),
 584          "we should be able to allocate at least one eden region");
 585   assert(young_list_target_length >= absolute_min_length, "post-condition");
 586   _young_list_target_length = young_list_target_length;
 587 
 588   update_max_gc_locker_expansion();
 589 }
 590 
 591 uint
 592 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 593                                                      uint base_min_length,
 594                                                      uint desired_min_length,
 595                                                      uint desired_max_length) {
 596   assert(adaptive_young_list_length(), "pre-condition");
 597   assert(gcs_are_young(), "only call this for young GCs");
 598 
 599   // In case some edge-condition makes the desired max length too small...
 600   if (desired_max_length <= desired_min_length) {
 601     return desired_min_length;
 602   }
 603 
 604   // We'll adjust min_young_length and max_young_length not to include
 605   // the already allocated young regions (i.e., so they reflect the
 606   // min and max eden regions we'll allocate). The base_min_length
 607   // will be reflected in the predictions by the
 608   // survivor_regions_evac_time prediction.
 609   assert(desired_min_length > base_min_length, "invariant");
 610   uint min_young_length = desired_min_length - base_min_length;
 611   assert(desired_max_length > base_min_length, "invariant");
 612   uint max_young_length = desired_max_length - base_min_length;
 613 
 614   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 615   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 616   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
 617   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();


 680                               base_free_regions, target_pause_time_ms),
 681              "min_young_length, the result of the binary search, should "
 682              "fit into the pause target");
 683       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
 684                                base_free_regions, target_pause_time_ms),
 685              "min_young_length, the result of the binary search, should be "
 686              "optimal, so no larger length should fit into the pause target");
 687     }
 688   } else {
 689     // Even the minimum length doesn't fit into the pause time
 690     // target, return it as the result nevertheless.
 691   }
 692   return base_min_length + min_young_length;
 693 }
 694 
 695 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
 696   double survivor_regions_evac_time = 0.0;
 697   for (HeapRegion * r = _recorded_survivor_head;
 698        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
 699        r = r->get_next_young_region()) {
 700     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
 701   }
 702   return survivor_regions_evac_time;
 703 }
 704 
 705 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 706   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 707 
 708   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 709   if (rs_lengths > _rs_lengths_prediction) {
 710     // add 10% to avoid having to recalculate often
 711     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 712     update_young_list_target_length(rs_lengths_prediction);
 713   }
 714 }
 715 
 716 
 717 
 718 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 719                                                bool is_tlab,
 720                                                bool* gc_overhead_limit_was_exceeded) {


 765         ret = false;
 766       }
 767 
 768       if (age <= prev_age) {
 769         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 770                                "(%d, %d)", name, age, prev_age);
 771         ret = false;
 772       }
 773       prev_age = age;
 774     }
 775   }
 776 
 777   return ret;
 778 }
 779 #endif // PRODUCT
 780 
 781 void G1CollectorPolicy::record_full_collection_start() {
 782   _full_collection_start_sec = os::elapsedTime();
 783   record_heap_size_info_at_start(true /* full */);
 784   // Release the future to-space so that it is available for compaction into.
 785   _g1->set_full_collection();
 786 }
 787 
 788 void G1CollectorPolicy::record_full_collection_end() {
 789   // Consider this like a collection pause for the purposes of allocation
 790   // since last pause.
 791   double end_sec = os::elapsedTime();
 792   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 793   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 794 
 795   _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
 796 
 797   update_recent_gc_times(end_sec, full_gc_time_ms);
 798 
 799   _g1->clear_full_collection();
 800 
 801   // "Nuke" the heuristics that control the young/mixed GC
 802   // transitions and make sure we start with young GCs after the Full GC.
 803   set_gcs_are_young(true);
 804   _last_young_gc = false;
 805   clear_initiate_conc_mark_if_possible();
 806   clear_during_initial_mark_pause();
 807   _in_marking_window = false;
 808   _in_marking_window_im = false;
 809 
 810   _short_lived_surv_rate_group->start_adding_regions();
 811   // also call this on any additional surv rate groups
 812 
 813   record_survivor_regions(0, NULL, NULL);
 814 
 815   _free_regions_at_end_of_collection = _g1->num_free_regions();
 816   // Reset survivors SurvRateGroup.
 817   _survivor_surv_rate_group->reset();
 818   update_young_list_target_length();
 819   _collectionSetChooser->clear();
 820 }
 821 
 822 void G1CollectorPolicy::record_stop_world_start() {
 823   _stop_world_start = os::elapsedTime();
 824 }
 825 
 826 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
 827   // We only need to do this here as the policy will only be applied
 828   // to the GC we're about to start. so, no point is calculating this
 829   // every time we calculate / recalculate the target young length.
 830   update_survivors_policy();
 831 
 832   assert(_g1->used() == _g1->recalculate_used(),
 833          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 834                  _g1->used(), _g1->recalculate_used()));
 835 
 836   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 837   _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
 838   _stop_world_start = 0.0;
 839 
 840   record_heap_size_info_at_start(false /* full */);
 841 
 842   phase_times()->record_cur_collection_start_sec(start_time_sec);
 843   _pending_cards = _g1->pending_card_num();
 844 
 845   _collection_set_bytes_used_before = 0;
 846   _bytes_copied_during_gc = 0;
 847 
 848   _last_gc_was_young = false;
 849 
 850   // do that for any other surv rate groups
 851   _short_lived_surv_rate_group->stop_adding_regions();
 852   _survivors_age_table.clear();
 853 
 854   assert( verify_young_ages(), "region age verification" );
 855 }
 856 
 857 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 858                                                    mark_init_elapsed_time_ms) {
 859   _during_marking = true;
 860   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 861   clear_during_initial_mark_pause();
 862   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 863 }
 864 
 865 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 866   _mark_remark_start_sec = os::elapsedTime();
 867   _during_marking = false;
 868 }
 869 
 870 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 871   double end_time_sec = os::elapsedTime();
 872   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 873   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 874   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 875   _prev_collection_pause_end_ms += elapsed_time_ms;
 876 
 877   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
 878 }
 879 
 880 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 881   _mark_cleanup_start_sec = os::elapsedTime();
 882 }
 883 
 884 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 885   _last_young_gc = true;
 886   _in_marking_window = false;
 887 }
 888 
 889 void G1CollectorPolicy::record_concurrent_pause() {
 890   if (_stop_world_start > 0.0) {
 891     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 892     _trace_young_gen_time_data.record_yield_time(yield_ms);
 893   }
 894 }
 895 
 896 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 897   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
 898     return false;
 899   }
 900 
 901   size_t marking_initiating_used_threshold =
 902     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 903   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 904   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 905 
 906   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 907     if (gcs_are_young() && !_last_young_gc) {
 908       ergo_verbose5(ErgoConcCycles,
 909         "request concurrent cycle initiation",
 910         ergo_format_reason("occupancy higher than threshold")
 911         ergo_format_byte("occupancy")
 912         ergo_format_byte("allocation request")
 913         ergo_format_byte_perc("threshold")
 914         ergo_format_str("source"),
 915         cur_used_bytes,
 916         alloc_byte_size,
 917         marking_initiating_used_threshold,
 918         (double) InitiatingHeapOccupancyPercent,
 919         source);
 920       return true;
 921     } else {
 922       ergo_verbose5(ErgoConcCycles,
 923         "do not request concurrent cycle initiation",
 924         ergo_format_reason("still doing mixed collections")
 925         ergo_format_byte("occupancy")
 926         ergo_format_byte("allocation request")
 927         ergo_format_byte_perc("threshold")


 942 
 943 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
 944   double end_time_sec = os::elapsedTime();
 945   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
 946          "otherwise, the subtraction below does not make sense");
 947   size_t rs_size =
 948             _cur_collection_pause_used_regions_at_start - cset_region_length();
 949   size_t cur_used_bytes = _g1->used();
 950   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 951   bool last_pause_included_initial_mark = false;
 952   bool update_stats = !_g1->evacuation_failed();
 953 
 954 #ifndef PRODUCT
 955   if (G1YoungSurvRateVerbose) {
 956     gclog_or_tty->cr();
 957     _short_lived_surv_rate_group->print();
 958     // do that for any other surv rate groups too
 959   }
 960 #endif // PRODUCT
 961 
 962   last_pause_included_initial_mark = during_initial_mark_pause();
 963   if (last_pause_included_initial_mark) {
 964     record_concurrent_mark_init_end(0.0);
 965   } else if (need_to_start_conc_mark("end of GC")) {
 966     // Note: this might have already been set, if during the last
 967     // pause we decided to start a cycle but at the beginning of
 968     // this pause we decided to postpone it. That's OK.
 969     set_initiate_conc_mark_if_possible();
 970   }
 971 
 972   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
 973                           end_time_sec, false);
 974 
 975   evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
 976   evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
 977 
 978   if (update_stats) {
 979     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
 980     // this is where we update the allocation rate of the application
 981     double app_time_ms =
 982       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 983     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 984       // This usually happens due to the timer not having the required
 985       // granularity. Some Linuxes are the usual culprits.
 986       // We'll just set it to something (arbitrarily) small.
 987       app_time_ms = 1.0;
 988     }
 989     // We maintain the invariant that all objects allocated by mutator


1011       gclog_or_tty->print_cr("Recent GC Times (ms):");
1012       _recent_gc_times_ms->dump();
1013       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1014       _recent_prev_end_times_for_all_gcs_sec->dump();
1015       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1016                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1017       // In debug mode, terminate the JVM if the user wants to debug at this point.
1018       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1019 #endif  // !PRODUCT
1020       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1021       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1022       if (_recent_avg_pause_time_ratio < 0.0) {
1023         _recent_avg_pause_time_ratio = 0.0;
1024       } else {
1025         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1026         _recent_avg_pause_time_ratio = 1.0;
1027       }
1028     }
1029   }
1030 
1031   bool new_in_marking_window = _in_marking_window;
1032   bool new_in_marking_window_im = false;
1033   if (last_pause_included_initial_mark) {
1034     new_in_marking_window = true;
1035     new_in_marking_window_im = true;
1036   }
1037 
1038   if (_last_young_gc) {
1039     // This is supposed to to be the "last young GC" before we start
1040     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1041 
1042     if (!last_pause_included_initial_mark) {
1043       if (next_gc_should_be_mixed("start mixed GCs",
1044                                   "do not start mixed GCs")) {
1045         set_gcs_are_young(false);
1046       }
1047     } else {
1048       ergo_verbose0(ErgoMixedGCs,
1049                     "do not start mixed GCs",
1050                     ergo_format_reason("concurrent cycle is about to start"));
1051     }
1052     _last_young_gc = false;
1053   }
1054 
1055   if (!_last_gc_was_young) {
1056     // This is a mixed GC. Here we decide whether to continue doing
1057     // mixed GCs or not.
1058 
1059     if (!next_gc_should_be_mixed("continue mixed GCs",
1060                                  "do not continue mixed GCs")) {
1061       set_gcs_are_young(true);
1062     }
1063   }
1064 
1065   _short_lived_surv_rate_group->start_adding_regions();
1066   // Do that for any other surv rate groups
1067 
1068   if (update_stats) {
1069     double cost_per_card_ms = 0.0;
1070     if (_pending_cards > 0) {
1071       cost_per_card_ms = phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards;
1072       _cost_per_card_ms_seq->add(cost_per_card_ms);
1073     }
1074 
1075     size_t cards_scanned = _g1->cards_scanned();
1076 
1077     double cost_per_entry_ms = 0.0;
1078     if (cards_scanned > 10) {
1079       cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
1080       if (_last_gc_was_young) {
1081         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1082       } else {
1083         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1084       }
1085     }
1086 
1087     if (_max_rs_lengths > 0) {
1088       double cards_per_entry_ratio =
1089         (double) cards_scanned / (double) _max_rs_lengths;
1090       if (_last_gc_was_young) {
1091         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1092       } else {
1093         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1094       }
1095     }
1096 
1097     // This is defensive. For a while _max_rs_lengths could get
1098     // smaller than _recorded_rs_lengths which was causing
1099     // rs_length_diff to get very large and mess up the RSet length
1100     // predictions. The reason was unsafe concurrent updates to the
1101     // _inc_cset_recorded_rs_lengths field which the code below guards
1102     // against (see CR 7118202). This bug has now been fixed (see CR
1103     // 7119027). However, I'm still worried that
1104     // _inc_cset_recorded_rs_lengths might still end up somewhat
1105     // inaccurate. The concurrent refinement thread calculates an
1106     // RSet's length concurrently with other CR threads updating it
1107     // which might cause it to calculate the length incorrectly (if,
1108     // say, it's in mid-coarsening). So I'll leave in the defensive
1109     // conditional below just in case.
1110     size_t rs_length_diff = 0;
1111     if (_max_rs_lengths > _recorded_rs_lengths) {
1112       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1113     }
1114     _rs_length_diff_seq->add((double) rs_length_diff);
1115 
1116     size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1117     size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1118     double cost_per_byte_ms = 0.0;
1119 
1120     if (copied_bytes > 0) {
1121       cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
1122       if (_in_marking_window) {
1123         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1124       } else {
1125         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1126       }
1127     }
1128 
1129     double all_other_time_ms = pause_time_ms -
1130       (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) + phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) +
1131           phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) + phase_times()->average_time_ms(G1GCPhaseTimes::Termination));
1132 
1133     double young_other_time_ms = 0.0;
1134     if (young_cset_region_length() > 0) {
1135       young_other_time_ms =
1136         phase_times()->young_cset_choice_time_ms() +
1137         phase_times()->young_free_cset_time_ms();
1138       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1139                                           (double) young_cset_region_length());
1140     }
1141     double non_young_other_time_ms = 0.0;
1142     if (old_cset_region_length() > 0) {


1145         phase_times()->non_young_free_cset_time_ms();
1146 
1147       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1148                                             (double) old_cset_region_length());
1149     }
1150 
1151     double constant_other_time_ms = all_other_time_ms -
1152       (young_other_time_ms + non_young_other_time_ms);
1153     _constant_other_time_ms_seq->add(constant_other_time_ms);
1154 
1155     double survival_ratio = 0.0;
1156     if (_collection_set_bytes_used_before > 0) {
1157       survival_ratio = (double) _bytes_copied_during_gc /
1158                                    (double) _collection_set_bytes_used_before;
1159     }
1160 
1161     _pending_cards_seq->add((double) _pending_cards);
1162     _rs_lengths_seq->add((double) _max_rs_lengths);
1163   }
1164 
1165   _in_marking_window = new_in_marking_window;
1166   _in_marking_window_im = new_in_marking_window_im;
1167   _free_regions_at_end_of_collection = _g1->num_free_regions();
1168   update_young_list_target_length();
1169 
1170   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1171   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1172   adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS),
1173                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms);
1174 
1175   _collectionSetChooser->verify();
1176 }
1177 
1178 #define EXT_SIZE_FORMAT "%.1f%s"
1179 #define EXT_SIZE_PARAMS(bytes)                                  \
1180   byte_size_in_proper_unit((double)(bytes)),                    \
1181   proper_unit_for_byte_size((bytes))
1182 
1183 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1184   YoungList* young_list = _g1->young_list();
1185   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1186   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();


1284     dcqs.set_completed_queue_padding(curr_queue_size);
1285   } else {
1286     dcqs.set_completed_queue_padding(0);
1287   }
1288   dcqs.notify_if_necessary();
1289 }
1290 
1291 double
1292 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1293                                                 size_t scanned_cards) {
1294   return
1295     predict_rs_update_time_ms(pending_cards) +
1296     predict_rs_scan_time_ms(scanned_cards) +
1297     predict_constant_other_time_ms();
1298 }
1299 
1300 double
1301 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1302   size_t rs_length = predict_rs_length_diff();
1303   size_t card_num;
1304   if (gcs_are_young()) {
1305     card_num = predict_young_card_num(rs_length);
1306   } else {
1307     card_num = predict_non_young_card_num(rs_length);
1308   }
1309   return predict_base_elapsed_time_ms(pending_cards, card_num);
1310 }
1311 
1312 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1313   size_t bytes_to_copy;
1314   if (hr->is_marked())
1315     bytes_to_copy = hr->max_live_bytes();
1316   else {
1317     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1318     int age = hr->age_in_surv_rate_group();
1319     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1320     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1321   }
1322   return bytes_to_copy;
1323 }
1324 


1450 void G1CollectorPolicy::update_survivors_policy() {
1451   double max_survivor_regions_d =
1452                  (double) _young_list_target_length / (double) SurvivorRatio;
1453   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1454   // smaller than 1.0) we'll get 1.
1455   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1456 
1457   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1458         HeapRegion::GrainWords * _max_survivor_regions, counters());
1459 }
1460 
1461 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1462                                                      GCCause::Cause gc_cause) {
1463   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1464   if (!during_cycle) {
1465     ergo_verbose1(ErgoConcCycles,
1466                   "request concurrent cycle initiation",
1467                   ergo_format_reason("requested by GC cause")
1468                   ergo_format_str("GC cause"),
1469                   GCCause::to_string(gc_cause));
1470     set_initiate_conc_mark_if_possible();
1471     return true;
1472   } else {
1473     ergo_verbose1(ErgoConcCycles,
1474                   "do not request concurrent cycle initiation",
1475                   ergo_format_reason("concurrent cycle already in progress")
1476                   ergo_format_str("GC cause"),
1477                   GCCause::to_string(gc_cause));
1478     return false;
1479   }
1480 }
1481 
1482 void
1483 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1484   // We are about to decide on whether this pause will be an
1485   // initial-mark pause.
1486 
1487   // First, during_initial_mark_pause() should not be already set. We
1488   // will set it here if we have to. However, it should be cleared by
1489   // the end of the pause (it's only set for the duration of an
1490   // initial-mark pause).
1491   assert(!during_initial_mark_pause(), "pre-condition");
1492 
1493   if (initiate_conc_mark_if_possible()) {
1494     // We had noticed on a previous pause that the heap occupancy has
1495     // gone over the initiating threshold and we should start a
1496     // concurrent marking cycle. So we might initiate one.
1497 
1498     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1499     if (!during_cycle) {
1500       // The concurrent marking thread is not "during a cycle", i.e.,
1501       // it has completed the last one. So we can go ahead and
1502       // initiate a new cycle.
1503 
1504       set_during_initial_mark_pause();
1505       // We do not allow mixed GCs during marking.
1506       if (!gcs_are_young()) {
1507         set_gcs_are_young(true);
1508         ergo_verbose0(ErgoMixedGCs,
1509                       "end mixed GCs",
1510                       ergo_format_reason("concurrent cycle is about to start"));
1511       }
1512 
1513       // And we can now clear initiate_conc_mark_if_possible() as
1514       // we've already acted on it.
1515       clear_initiate_conc_mark_if_possible();
1516 
1517       ergo_verbose0(ErgoConcCycles,
1518                   "initiate concurrent cycle",
1519                   ergo_format_reason("concurrent cycle initiation requested"));
1520     } else {
1521       // The concurrent marking thread is still finishing up the
1522       // previous cycle. If we start one right now the two cycles
1523       // overlap. In particular, the concurrent marking thread might
1524       // be in the process of clearing the next marking bitmap (which
1525       // we will use for the next cycle if we start one). Starting a
1526       // cycle now will be bad given that parts of the marking
1527       // information might get cleared by the marking thread. And we
1528       // cannot wait for the marking thread to finish the cycle as it
1529       // periodically yields while clearing the next marking bitmap
1530       // and, if it's in a yield point, it's waiting for us to
1531       // finish. So, at this point we will not start a cycle and we'll
1532       // let the concurrent marking thread complete the last one.
1533       ergo_verbose0(ErgoConcCycles,
1534                     "do not initiate concurrent cycle",
1535                     ergo_format_reason("concurrent cycle already in progress"));


1666   _inc_cset_predicted_elapsed_time_ms +=
1667                                      _inc_cset_predicted_elapsed_time_ms_diffs;
1668 
1669   _inc_cset_recorded_rs_lengths_diffs = 0;
1670   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1671 }
1672 
1673 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1674   // This routine is used when:
1675   // * adding survivor regions to the incremental cset at the end of an
1676   //   evacuation pause,
1677   // * adding the current allocation region to the incremental cset
1678   //   when it is retired, and
1679   // * updating existing policy information for a region in the
1680   //   incremental cset via young list RSet sampling.
1681   // Therefore this routine may be called at a safepoint by the
1682   // VM thread, or in-between safepoints by mutator threads (when
1683   // retiring the current allocation region) or a concurrent
1684   // refine thread (RSet sampling).
1685 
1686   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1687   size_t used_bytes = hr->used();
1688   _inc_cset_recorded_rs_lengths += rs_length;
1689   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1690   _inc_cset_bytes_used_before += used_bytes;
1691 
1692   // Cache the values we have added to the aggregated information
1693   // in the heap region in case we have to remove this region from
1694   // the incremental collection set, or it is updated by the
1695   // rset sampling code
1696   hr->set_recorded_rs_length(rs_length);
1697   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1698 }
1699 
1700 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1701                                                      size_t new_rs_length) {
1702   // Update the CSet information that is dependent on the new RS length
1703   assert(hr->is_young(), "Precondition");
1704   assert(!SafepointSynchronize::is_at_safepoint(),
1705                                                "should not be at a safepoint");
1706 
1707   // We could have updated _inc_cset_recorded_rs_lengths and
1708   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1709   // that atomically, as this code is executed by a concurrent
1710   // refinement thread, potentially concurrently with a mutator thread
1711   // allocating a new region and also updating the same fields. To
1712   // avoid the atomic operations we accumulate these updates on two
1713   // separate fields (*_diffs) and we'll just add them to the "main"
1714   // fields at the start of a GC.
1715 
1716   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1717   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1718   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1719 
1720   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1721   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1722   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1723   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1724 
1725   hr->set_recorded_rs_length(new_rs_length);
1726   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1727 }
1728 
1729 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1730   assert(hr->is_young(), "invariant");
1731   assert(hr->young_index_in_cset() > -1, "should have already been set");
1732   assert(_inc_cset_build_state == Active, "Precondition");
1733 
1734   // We need to clear and set the cached recorded/cached collection set
1735   // information in the heap region here (before the region gets added
1736   // to the collection set). An individual heap region's cached values
1737   // are calculated, aggregated with the policy collection set info,
1738   // and cached in the heap region here (initially) and (subsequently)
1739   // by the Young List sampling code.
1740 
1741   size_t rs_length = hr->rem_set()->occupied();
1742   add_to_incremental_cset_info(hr, rs_length);
1743 
1744   HeapWord* hr_end = hr->end();
1745   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1746 
1747   assert(!hr->in_collection_set(), "invariant");
1748   _g1->register_young_region_with_cset(hr);
1749   assert(hr->next_in_collection_set() == NULL, "invariant");
1750 }
1751 
1752 // Add the region at the RHS of the incremental cset
1753 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1754   // We should only ever be appending survivors at the end of a pause
1755   assert(hr->is_survivor(), "Logic");
1756 
1757   // Do the 'common' stuff
1758   add_region_to_incremental_cset_common(hr);
1759 
1760   // Now add the region at the right hand side
1761   if (_inc_cset_tail == NULL) {
1762     assert(_inc_cset_head == NULL, "invariant");
1763     _inc_cset_head = hr;
1764   } else {
1765     _inc_cset_tail->set_next_in_collection_set(hr);
1766   }
1767   _inc_cset_tail = hr;
1768 }
1769 


1894   YoungList* young_list = _g1->young_list();
1895   finalize_incremental_cset_building();
1896 
1897   guarantee(target_pause_time_ms > 0.0,
1898             err_msg("target_pause_time_ms = %1.6lf should be positive",
1899                     target_pause_time_ms));
1900   guarantee(_collection_set == NULL, "Precondition");
1901 
1902   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1903   double predicted_pause_time_ms = base_time_ms;
1904   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1905 
1906   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1907                 "start choosing CSet",
1908                 ergo_format_size("_pending_cards")
1909                 ergo_format_ms("predicted base time")
1910                 ergo_format_ms("remaining time")
1911                 ergo_format_ms("target pause time"),
1912                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1913 
1914   _last_gc_was_young = gcs_are_young() ? true : false;
1915 
1916   if (_last_gc_was_young) {
1917     _trace_young_gen_time_data.increment_young_collection_count();
1918   } else {
1919     _trace_young_gen_time_data.increment_mixed_collection_count();
1920   }
1921 
1922   // The young list is laid with the survivor regions from the previous
1923   // pause are appended to the RHS of the young list, i.e.
1924   //   [Newly Young Regions ++ Survivors from last pause].
1925 
1926   uint survivor_region_length = young_list->survivor_length();
1927   uint eden_region_length = young_list->eden_length();
1928   init_cset_region_lengths(eden_region_length, survivor_region_length);
1929 
1930   HeapRegion* hr = young_list->first_survivor_region();
1931   while (hr != NULL) {
1932     assert(hr->is_survivor(), "badly formed young list");
1933     // There is a convention that all the young regions in the CSet
1934     // are tagged as "eden", so we do this for the survivors here. We
1935     // use the special set_eden_pre_gc() as it doesn't check that the
1936     // region is free (which is not the case here).


1947   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1948 
1949   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1950                 "add young regions to CSet",
1951                 ergo_format_region("eden")
1952                 ergo_format_region("survivors")
1953                 ergo_format_ms("predicted young region time"),
1954                 eden_region_length, survivor_region_length,
1955                 _inc_cset_predicted_elapsed_time_ms);
1956 
1957   // The number of recorded young regions is the incremental
1958   // collection set's current size
1959   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1960 
1961   double young_end_time_sec = os::elapsedTime();
1962   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1963 
1964   // Set the start of the non-young choice time.
1965   double non_young_start_time_sec = young_end_time_sec;
1966 
1967   if (!gcs_are_young()) {
1968     CollectionSetChooser* cset_chooser = _collectionSetChooser;
1969     cset_chooser->verify();
1970     const uint min_old_cset_length = calc_min_old_cset_length();
1971     const uint max_old_cset_length = calc_max_old_cset_length();
1972 
1973     uint expensive_region_num = 0;
1974     bool check_time_remaining = adaptive_young_list_length();
1975 
1976     HeapRegion* hr = cset_chooser->peek();
1977     while (hr != NULL) {
1978       if (old_cset_region_length() >= max_old_cset_length) {
1979         // Added maximum number of old regions to the CSet.
1980         ergo_verbose2(ErgoCSetConstruction,
1981                       "finish adding old regions to CSet",
1982                       ergo_format_reason("old CSet region num reached max")
1983                       ergo_format_region("old")
1984                       ergo_format_region("max"),
1985                       old_cset_region_length(), max_old_cset_length);
1986         break;
1987       }


1993       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1994       double threshold = (double) G1HeapWastePercent;
1995       if (reclaimable_perc <= threshold) {
1996         // We've added enough old regions that the amount of uncollected
1997         // reclaimable space is at or below the waste threshold. Stop
1998         // adding old regions to the CSet.
1999         ergo_verbose5(ErgoCSetConstruction,
2000                       "finish adding old regions to CSet",
2001                       ergo_format_reason("reclaimable percentage not over threshold")
2002                       ergo_format_region("old")
2003                       ergo_format_region("max")
2004                       ergo_format_byte_perc("reclaimable")
2005                       ergo_format_perc("threshold"),
2006                       old_cset_region_length(),
2007                       max_old_cset_length,
2008                       reclaimable_bytes,
2009                       reclaimable_perc, threshold);
2010         break;
2011       }
2012 
2013       double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
2014       if (check_time_remaining) {
2015         if (predicted_time_ms > time_remaining_ms) {
2016           // Too expensive for the current CSet.
2017 
2018           if (old_cset_region_length() >= min_old_cset_length) {
2019             // We have added the minimum number of old regions to the CSet,
2020             // we are done with this CSet.
2021             ergo_verbose4(ErgoCSetConstruction,
2022                           "finish adding old regions to CSet",
2023                           ergo_format_reason("predicted time is too high")
2024                           ergo_format_ms("predicted time")
2025                           ergo_format_ms("remaining time")
2026                           ergo_format_region("old")
2027                           ergo_format_region("min"),
2028                           predicted_time_ms, time_remaining_ms,
2029                           old_cset_region_length(), min_old_cset_length);
2030             break;
2031           }
2032 
2033           // We'll add it anyway given that we haven't reached the




  90   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  91   _prev_collection_pause_end_ms(0.0),
  92   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  93   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  94   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  95   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  96   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  97   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  98   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _non_young_other_cost_per_region_ms_seq(
 103                                          new TruncatedSeq(TruncatedSeqLength)),
 104 
 105   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 106   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 107 
 108   _pause_time_target_ms((double) MaxGCPauseMillis),
 109 






 110   _recent_prev_end_times_for_all_gcs_sec(
 111                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 112 
 113   _recent_avg_pause_time_ratio(0.0),
 114 





 115   _eden_used_bytes_before_gc(0),
 116   _survivor_used_bytes_before_gc(0),
 117   _heap_used_bytes_before_gc(0),
 118   _metaspace_used_bytes_before_gc(0),
 119   _eden_capacity_bytes_before_gc(0),
 120   _heap_capacity_bytes_before_gc(0),
 121 
 122   _eden_cset_region_length(0),
 123   _survivor_cset_region_length(0),
 124   _old_cset_region_length(0),
 125 
 126   _sigma(G1ConfidencePercent / 100.0),
 127 
 128   _collection_set(NULL),
 129   _collection_set_bytes_used_before(0),
 130 
 131   // Incremental CSet attributes
 132   _inc_cset_build_state(Inactive),
 133   _inc_cset_head(NULL),
 134   _inc_cset_tail(NULL),


 306 void G1CollectorPolicy::initialize_flags() {
 307   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
 308     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
 309   }
 310 
 311   if (SurvivorRatio < 1) {
 312     vm_exit_during_initialization("Invalid survivor ratio specified");
 313   }
 314   CollectorPolicy::initialize_flags();
 315   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
 316 }
 317 
 318 void G1CollectorPolicy::post_heap_initialize() {
 319   uintx max_regions = G1CollectedHeap::heap()->max_regions();
 320   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
 321   if (max_young_size != MaxNewSize) {
 322     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
 323   }
 324 }
 325 
 326 G1CollectorState* G1CollectorPolicy::collector_state() { return _g1->collector_state(); }
 327 
 328 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
 329         _min_desired_young_length(0), _max_desired_young_length(0) {
 330   if (FLAG_IS_CMDLINE(NewRatio)) {
 331     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 332       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
 333     } else {
 334       _sizer_kind = SizerNewRatio;
 335       _adaptive_size = false;
 336       return;
 337     }
 338   }
 339 
 340   if (NewSize > MaxNewSize) {
 341     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 342       warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
 343               "A new max generation size of " SIZE_FORMAT "k will be used.",
 344               NewSize/K, MaxNewSize/K, NewSize/K);
 345     }
 346     MaxNewSize = NewSize;
 347   }


 526   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 527   // This is the absolute minimum young length. Ensure that we
 528   // will at least have one eden region available for allocation.
 529   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
 530   // If we shrank the young list target it should not shrink below the current size.
 531   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 532   // Calculate the absolute and desired max bounds.
 533 
 534   // We will try our best not to "eat" into the reserve.
 535   uint absolute_max_length = 0;
 536   if (_free_regions_at_end_of_collection > _reserve_regions) {
 537     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 538   }
 539   uint desired_max_length = calculate_young_list_desired_max_length();
 540   if (desired_max_length > absolute_max_length) {
 541     desired_max_length = absolute_max_length;
 542   }
 543 
 544   uint young_list_target_length = 0;
 545   if (adaptive_young_list_length()) {
 546     if (collector_state()->gcs_are_young()) {
 547       young_list_target_length =
 548                         calculate_young_list_target_length(rs_lengths,
 549                                                            base_min_length,
 550                                                            desired_min_length,
 551                                                            desired_max_length);
 552       _rs_lengths_prediction = rs_lengths;
 553     } else {
 554       // Don't calculate anything and let the code below bound it to
 555       // the desired_min_length, i.e., do the next GC as soon as
 556       // possible to maximize how many old regions we can add to it.
 557     }
 558   } else {
 559     // The user asked for a fixed young gen so we'll fix the young gen
 560     // whether the next GC is young or mixed.
 561     young_list_target_length = _young_list_fixed_length;
 562   }
 563 
 564   // Make sure we don't go over the desired max length, nor under the
 565   // desired min length. In case they clash, desired_min_length wins
 566   // which is why that test is second.


 568     young_list_target_length = desired_max_length;
 569   }
 570   if (young_list_target_length < desired_min_length) {
 571     young_list_target_length = desired_min_length;
 572   }
 573 
 574   assert(young_list_target_length > recorded_survivor_regions(),
 575          "we should be able to allocate at least one eden region");
 576   assert(young_list_target_length >= absolute_min_length, "post-condition");
 577   _young_list_target_length = young_list_target_length;
 578 
 579   update_max_gc_locker_expansion();
 580 }
 581 
 582 uint
 583 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 584                                                      uint base_min_length,
 585                                                      uint desired_min_length,
 586                                                      uint desired_max_length) {
 587   assert(adaptive_young_list_length(), "pre-condition");
 588   assert(collector_state()->gcs_are_young(), "only call this for young GCs");
 589 
 590   // In case some edge-condition makes the desired max length too small...
 591   if (desired_max_length <= desired_min_length) {
 592     return desired_min_length;
 593   }
 594 
 595   // We'll adjust min_young_length and max_young_length not to include
 596   // the already allocated young regions (i.e., so they reflect the
 597   // min and max eden regions we'll allocate). The base_min_length
 598   // will be reflected in the predictions by the
 599   // survivor_regions_evac_time prediction.
 600   assert(desired_min_length > base_min_length, "invariant");
 601   uint min_young_length = desired_min_length - base_min_length;
 602   assert(desired_max_length > base_min_length, "invariant");
 603   uint max_young_length = desired_max_length - base_min_length;
 604 
 605   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 606   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 607   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
 608   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();


 671                               base_free_regions, target_pause_time_ms),
 672              "min_young_length, the result of the binary search, should "
 673              "fit into the pause target");
 674       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
 675                                base_free_regions, target_pause_time_ms),
 676              "min_young_length, the result of the binary search, should be "
 677              "optimal, so no larger length should fit into the pause target");
 678     }
 679   } else {
 680     // Even the minimum length doesn't fit into the pause time
 681     // target, return it as the result nevertheless.
 682   }
 683   return base_min_length + min_young_length;
 684 }
 685 
 686 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
 687   double survivor_regions_evac_time = 0.0;
 688   for (HeapRegion * r = _recorded_survivor_head;
 689        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
 690        r = r->get_next_young_region()) {
 691     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
 692   }
 693   return survivor_regions_evac_time;
 694 }
 695 
 696 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 697   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 698 
 699   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 700   if (rs_lengths > _rs_lengths_prediction) {
 701     // add 10% to avoid having to recalculate often
 702     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 703     update_young_list_target_length(rs_lengths_prediction);
 704   }
 705 }
 706 
 707 
 708 
 709 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 710                                                bool is_tlab,
 711                                                bool* gc_overhead_limit_was_exceeded) {


 756         ret = false;
 757       }
 758 
 759       if (age <= prev_age) {
 760         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 761                                "(%d, %d)", name, age, prev_age);
 762         ret = false;
 763       }
 764       prev_age = age;
 765     }
 766   }
 767 
 768   return ret;
 769 }
 770 #endif // PRODUCT
 771 
 772 void G1CollectorPolicy::record_full_collection_start() {
 773   _full_collection_start_sec = os::elapsedTime();
 774   record_heap_size_info_at_start(true /* full */);
 775   // Release the future to-space so that it is available for compaction into.
 776   collector_state()->set_full_collection(true);
 777 }
 778 
 779 void G1CollectorPolicy::record_full_collection_end() {
 780   // Consider this like a collection pause for the purposes of allocation
 781   // since last pause.
 782   double end_sec = os::elapsedTime();
 783   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 784   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 785 
 786   _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
 787 
 788   update_recent_gc_times(end_sec, full_gc_time_ms);
 789 
 790   collector_state()->set_full_collection(false);
 791 
 792   // "Nuke" the heuristics that control the young/mixed GC
 793   // transitions and make sure we start with young GCs after the Full GC.
 794   collector_state()->set_gcs_are_young(true);
 795   collector_state()->set_last_young_gc(false);
 796   collector_state()->set_initiate_conc_mark_if_possible(false);
 797   collector_state()->set_during_initial_mark_pause(false);
 798   collector_state()->set_in_marking_window(false);
 799   collector_state()->set_in_marking_window_im(false);
 800 
 801   _short_lived_surv_rate_group->start_adding_regions();
 802   // also call this on any additional surv rate groups
 803 
 804   record_survivor_regions(0, NULL, NULL);
 805 
 806   _free_regions_at_end_of_collection = _g1->num_free_regions();
 807   // Reset survivors SurvRateGroup.
 808   _survivor_surv_rate_group->reset();
 809   update_young_list_target_length();
 810   _collectionSetChooser->clear();
 811 }
 812 
 813 void G1CollectorPolicy::record_stop_world_start() {
 814   _stop_world_start = os::elapsedTime();
 815 }
 816 
 817 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
 818   // We only need to do this here as the policy will only be applied
 819   // to the GC we're about to start. so, no point is calculating this
 820   // every time we calculate / recalculate the target young length.
 821   update_survivors_policy();
 822 
 823   assert(_g1->used() == _g1->recalculate_used(),
 824          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 825                  _g1->used(), _g1->recalculate_used()));
 826 
 827   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 828   _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
 829   _stop_world_start = 0.0;
 830 
 831   record_heap_size_info_at_start(false /* full */);
 832 
 833   phase_times()->record_cur_collection_start_sec(start_time_sec);
 834   _pending_cards = _g1->pending_card_num();
 835 
 836   _collection_set_bytes_used_before = 0;
 837   _bytes_copied_during_gc = 0;
 838 
 839   collector_state()->set_last_gc_was_young(false);
 840 
 841   // do that for any other surv rate groups
 842   _short_lived_surv_rate_group->stop_adding_regions();
 843   _survivors_age_table.clear();
 844 
 845   assert( verify_young_ages(), "region age verification" );
 846 }
 847 
 848 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 849                                                    mark_init_elapsed_time_ms) {
 850   collector_state()->set_during_marking(true);
 851   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 852   collector_state()->set_during_initial_mark_pause(false);
 853   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 854 }
 855 
 856 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 857   _mark_remark_start_sec = os::elapsedTime();
 858   collector_state()->set_during_marking(false);
 859 }
 860 
 861 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 862   double end_time_sec = os::elapsedTime();
 863   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 864   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 865   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 866   _prev_collection_pause_end_ms += elapsed_time_ms;
 867 
 868   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
 869 }
 870 
 871 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 872   _mark_cleanup_start_sec = os::elapsedTime();
 873 }
 874 
 875 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 876   collector_state()->set_last_young_gc(true);
 877   collector_state()->set_in_marking_window(false);
 878 }
 879 
 880 void G1CollectorPolicy::record_concurrent_pause() {
 881   if (_stop_world_start > 0.0) {
 882     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 883     _trace_young_gen_time_data.record_yield_time(yield_ms);
 884   }
 885 }
 886 
 887 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 888   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
 889     return false;
 890   }
 891 
 892   size_t marking_initiating_used_threshold =
 893     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 894   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 895   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 896 
 897   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 898     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 899       ergo_verbose5(ErgoConcCycles,
 900         "request concurrent cycle initiation",
 901         ergo_format_reason("occupancy higher than threshold")
 902         ergo_format_byte("occupancy")
 903         ergo_format_byte("allocation request")
 904         ergo_format_byte_perc("threshold")
 905         ergo_format_str("source"),
 906         cur_used_bytes,
 907         alloc_byte_size,
 908         marking_initiating_used_threshold,
 909         (double) InitiatingHeapOccupancyPercent,
 910         source);
 911       return true;
 912     } else {
 913       ergo_verbose5(ErgoConcCycles,
 914         "do not request concurrent cycle initiation",
 915         ergo_format_reason("still doing mixed collections")
 916         ergo_format_byte("occupancy")
 917         ergo_format_byte("allocation request")
 918         ergo_format_byte_perc("threshold")


 933 
 934 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
 935   double end_time_sec = os::elapsedTime();
 936   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
 937          "otherwise, the subtraction below does not make sense");
 938   size_t rs_size =
 939             _cur_collection_pause_used_regions_at_start - cset_region_length();
 940   size_t cur_used_bytes = _g1->used();
 941   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 942   bool last_pause_included_initial_mark = false;
 943   bool update_stats = !_g1->evacuation_failed();
 944 
 945 #ifndef PRODUCT
 946   if (G1YoungSurvRateVerbose) {
 947     gclog_or_tty->cr();
 948     _short_lived_surv_rate_group->print();
 949     // do that for any other surv rate groups too
 950   }
 951 #endif // PRODUCT
 952 
 953   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 954   if (last_pause_included_initial_mark) {
 955     record_concurrent_mark_init_end(0.0);
 956   } else if (need_to_start_conc_mark("end of GC")) {
 957     // Note: this might have already been set, if during the last
 958     // pause we decided to start a cycle but at the beginning of
 959     // this pause we decided to postpone it. That's OK.
 960     collector_state()->set_initiate_conc_mark_if_possible(true);
 961   }
 962 
 963   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
 964                           end_time_sec, false);
 965 
 966   evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
 967   evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
 968 
 969   if (update_stats) {
 970     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
 971     // this is where we update the allocation rate of the application
 972     double app_time_ms =
 973       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 974     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 975       // This usually happens due to the timer not having the required
 976       // granularity. Some Linuxes are the usual culprits.
 977       // We'll just set it to something (arbitrarily) small.
 978       app_time_ms = 1.0;
 979     }
 980     // We maintain the invariant that all objects allocated by mutator


1002       gclog_or_tty->print_cr("Recent GC Times (ms):");
1003       _recent_gc_times_ms->dump();
1004       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1005       _recent_prev_end_times_for_all_gcs_sec->dump();
1006       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1007                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1008       // In debug mode, terminate the JVM if the user wants to debug at this point.
1009       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1010 #endif  // !PRODUCT
1011       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1012       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1013       if (_recent_avg_pause_time_ratio < 0.0) {
1014         _recent_avg_pause_time_ratio = 0.0;
1015       } else {
1016         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1017         _recent_avg_pause_time_ratio = 1.0;
1018       }
1019     }
1020   }
1021 
1022   bool new_in_marking_window = collector_state()->in_marking_window();
1023   bool new_in_marking_window_im = false;
1024   if (last_pause_included_initial_mark) {
1025     new_in_marking_window = true;
1026     new_in_marking_window_im = true;
1027   }
1028 
1029   if (collector_state()->last_young_gc()) {
1030     // This is supposed to to be the "last young GC" before we start
1031     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1032 
1033     if (!last_pause_included_initial_mark) {
1034       if (next_gc_should_be_mixed("start mixed GCs",
1035                                   "do not start mixed GCs")) {
1036         collector_state()->set_gcs_are_young(false);
1037       }
1038     } else {
1039       ergo_verbose0(ErgoMixedGCs,
1040                     "do not start mixed GCs",
1041                     ergo_format_reason("concurrent cycle is about to start"));
1042     }
1043     collector_state()->set_last_young_gc(false);
1044   }
1045 
1046   if (!collector_state()->last_gc_was_young()) {
1047     // This is a mixed GC. Here we decide whether to continue doing
1048     // mixed GCs or not.
1049 
1050     if (!next_gc_should_be_mixed("continue mixed GCs",
1051                                  "do not continue mixed GCs")) {
1052       collector_state()->set_gcs_are_young(true);
1053     }
1054   }
1055 
1056   _short_lived_surv_rate_group->start_adding_regions();
1057   // Do that for any other surv rate groups
1058 
1059   if (update_stats) {
1060     double cost_per_card_ms = 0.0;
1061     if (_pending_cards > 0) {
1062       cost_per_card_ms = phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards;
1063       _cost_per_card_ms_seq->add(cost_per_card_ms);
1064     }
1065 
1066     size_t cards_scanned = _g1->cards_scanned();
1067 
1068     double cost_per_entry_ms = 0.0;
1069     if (cards_scanned > 10) {
1070       cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
1071       if (collector_state()->last_gc_was_young()) {
1072         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1073       } else {
1074         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1075       }
1076     }
1077 
1078     if (_max_rs_lengths > 0) {
1079       double cards_per_entry_ratio =
1080         (double) cards_scanned / (double) _max_rs_lengths;
1081       if (collector_state()->last_gc_was_young()) {
1082         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1083       } else {
1084         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1085       }
1086     }
1087 
1088     // This is defensive. For a while _max_rs_lengths could get
1089     // smaller than _recorded_rs_lengths which was causing
1090     // rs_length_diff to get very large and mess up the RSet length
1091     // predictions. The reason was unsafe concurrent updates to the
1092     // _inc_cset_recorded_rs_lengths field which the code below guards
1093     // against (see CR 7118202). This bug has now been fixed (see CR
1094     // 7119027). However, I'm still worried that
1095     // _inc_cset_recorded_rs_lengths might still end up somewhat
1096     // inaccurate. The concurrent refinement thread calculates an
1097     // RSet's length concurrently with other CR threads updating it
1098     // which might cause it to calculate the length incorrectly (if,
1099     // say, it's in mid-coarsening). So I'll leave in the defensive
1100     // conditional below just in case.
1101     size_t rs_length_diff = 0;
1102     if (_max_rs_lengths > _recorded_rs_lengths) {
1103       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1104     }
1105     _rs_length_diff_seq->add((double) rs_length_diff);
1106 
1107     size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1108     size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1109     double cost_per_byte_ms = 0.0;
1110 
1111     if (copied_bytes > 0) {
1112       cost_per_byte_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
1113       if (collector_state()->in_marking_window()) {
1114         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1115       } else {
1116         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1117       }
1118     }
1119 
1120     double all_other_time_ms = pause_time_ms -
1121       (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) + phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) +
1122           phase_times()->average_time_ms(G1GCPhaseTimes::ObjCopy) + phase_times()->average_time_ms(G1GCPhaseTimes::Termination));
1123 
1124     double young_other_time_ms = 0.0;
1125     if (young_cset_region_length() > 0) {
1126       young_other_time_ms =
1127         phase_times()->young_cset_choice_time_ms() +
1128         phase_times()->young_free_cset_time_ms();
1129       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1130                                           (double) young_cset_region_length());
1131     }
1132     double non_young_other_time_ms = 0.0;
1133     if (old_cset_region_length() > 0) {


1136         phase_times()->non_young_free_cset_time_ms();
1137 
1138       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1139                                             (double) old_cset_region_length());
1140     }
1141 
1142     double constant_other_time_ms = all_other_time_ms -
1143       (young_other_time_ms + non_young_other_time_ms);
1144     _constant_other_time_ms_seq->add(constant_other_time_ms);
1145 
1146     double survival_ratio = 0.0;
1147     if (_collection_set_bytes_used_before > 0) {
1148       survival_ratio = (double) _bytes_copied_during_gc /
1149                                    (double) _collection_set_bytes_used_before;
1150     }
1151 
1152     _pending_cards_seq->add((double) _pending_cards);
1153     _rs_lengths_seq->add((double) _max_rs_lengths);
1154   }
1155 
1156   collector_state()->set_in_marking_window(new_in_marking_window);
1157   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1158   _free_regions_at_end_of_collection = _g1->num_free_regions();
1159   update_young_list_target_length();
1160 
1161   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1162   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1163   adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS),
1164                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms);
1165 
1166   _collectionSetChooser->verify();
1167 }
1168 
1169 #define EXT_SIZE_FORMAT "%.1f%s"
1170 #define EXT_SIZE_PARAMS(bytes)                                  \
1171   byte_size_in_proper_unit((double)(bytes)),                    \
1172   proper_unit_for_byte_size((bytes))
1173 
1174 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1175   YoungList* young_list = _g1->young_list();
1176   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1177   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();


1275     dcqs.set_completed_queue_padding(curr_queue_size);
1276   } else {
1277     dcqs.set_completed_queue_padding(0);
1278   }
1279   dcqs.notify_if_necessary();
1280 }
1281 
1282 double
1283 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1284                                                 size_t scanned_cards) {
1285   return
1286     predict_rs_update_time_ms(pending_cards) +
1287     predict_rs_scan_time_ms(scanned_cards) +
1288     predict_constant_other_time_ms();
1289 }
1290 
1291 double
1292 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1293   size_t rs_length = predict_rs_length_diff();
1294   size_t card_num;
1295   if (collector_state()->gcs_are_young()) {
1296     card_num = predict_young_card_num(rs_length);
1297   } else {
1298     card_num = predict_non_young_card_num(rs_length);
1299   }
1300   return predict_base_elapsed_time_ms(pending_cards, card_num);
1301 }
1302 
1303 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1304   size_t bytes_to_copy;
1305   if (hr->is_marked())
1306     bytes_to_copy = hr->max_live_bytes();
1307   else {
1308     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1309     int age = hr->age_in_surv_rate_group();
1310     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1311     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1312   }
1313   return bytes_to_copy;
1314 }
1315 


1441 void G1CollectorPolicy::update_survivors_policy() {
1442   double max_survivor_regions_d =
1443                  (double) _young_list_target_length / (double) SurvivorRatio;
1444   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1445   // smaller than 1.0) we'll get 1.
1446   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1447 
1448   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1449         HeapRegion::GrainWords * _max_survivor_regions, counters());
1450 }
1451 
1452 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1453                                                      GCCause::Cause gc_cause) {
1454   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1455   if (!during_cycle) {
1456     ergo_verbose1(ErgoConcCycles,
1457                   "request concurrent cycle initiation",
1458                   ergo_format_reason("requested by GC cause")
1459                   ergo_format_str("GC cause"),
1460                   GCCause::to_string(gc_cause));
1461     collector_state()->set_initiate_conc_mark_if_possible(true);
1462     return true;
1463   } else {
1464     ergo_verbose1(ErgoConcCycles,
1465                   "do not request concurrent cycle initiation",
1466                   ergo_format_reason("concurrent cycle already in progress")
1467                   ergo_format_str("GC cause"),
1468                   GCCause::to_string(gc_cause));
1469     return false;
1470   }
1471 }
1472 
1473 void
1474 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1475   // We are about to decide on whether this pause will be an
1476   // initial-mark pause.
1477 
1478   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1479   // will set it here if we have to. However, it should be cleared by
1480   // the end of the pause (it's only set for the duration of an
1481   // initial-mark pause).
1482   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1483 
1484   if (collector_state()->initiate_conc_mark_if_possible()) {
1485     // We had noticed on a previous pause that the heap occupancy has
1486     // gone over the initiating threshold and we should start a
1487     // concurrent marking cycle. So we might initiate one.
1488 
1489     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1490     if (!during_cycle) {
1491       // The concurrent marking thread is not "during a cycle", i.e.,
1492       // it has completed the last one. So we can go ahead and
1493       // initiate a new cycle.
1494 
1495       collector_state()->set_during_initial_mark_pause(true);
1496       // We do not allow mixed GCs during marking.
1497       if (!collector_state()->gcs_are_young()) {
1498         collector_state()->set_gcs_are_young(true);
1499         ergo_verbose0(ErgoMixedGCs,
1500                       "end mixed GCs",
1501                       ergo_format_reason("concurrent cycle is about to start"));
1502       }
1503 
1504       // And we can now clear initiate_conc_mark_if_possible() as
1505       // we've already acted on it.
1506       collector_state()->set_initiate_conc_mark_if_possible(false);
1507 
1508       ergo_verbose0(ErgoConcCycles,
1509                   "initiate concurrent cycle",
1510                   ergo_format_reason("concurrent cycle initiation requested"));
1511     } else {
1512       // The concurrent marking thread is still finishing up the
1513       // previous cycle. If we start one right now the two cycles
1514       // overlap. In particular, the concurrent marking thread might
1515       // be in the process of clearing the next marking bitmap (which
1516       // we will use for the next cycle if we start one). Starting a
1517       // cycle now will be bad given that parts of the marking
1518       // information might get cleared by the marking thread. And we
1519       // cannot wait for the marking thread to finish the cycle as it
1520       // periodically yields while clearing the next marking bitmap
1521       // and, if it's in a yield point, it's waiting for us to
1522       // finish. So, at this point we will not start a cycle and we'll
1523       // let the concurrent marking thread complete the last one.
1524       ergo_verbose0(ErgoConcCycles,
1525                     "do not initiate concurrent cycle",
1526                     ergo_format_reason("concurrent cycle already in progress"));


1657   _inc_cset_predicted_elapsed_time_ms +=
1658                                      _inc_cset_predicted_elapsed_time_ms_diffs;
1659 
1660   _inc_cset_recorded_rs_lengths_diffs = 0;
1661   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1662 }
1663 
1664 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1665   // This routine is used when:
1666   // * adding survivor regions to the incremental cset at the end of an
1667   //   evacuation pause,
1668   // * adding the current allocation region to the incremental cset
1669   //   when it is retired, and
1670   // * updating existing policy information for a region in the
1671   //   incremental cset via young list RSet sampling.
1672   // Therefore this routine may be called at a safepoint by the
1673   // VM thread, or in-between safepoints by mutator threads (when
1674   // retiring the current allocation region) or a concurrent
1675   // refine thread (RSet sampling).
1676 
1677   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1678   size_t used_bytes = hr->used();
1679   _inc_cset_recorded_rs_lengths += rs_length;
1680   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1681   _inc_cset_bytes_used_before += used_bytes;
1682 
1683   // Cache the values we have added to the aggregated information
1684   // in the heap region in case we have to remove this region from
1685   // the incremental collection set, or it is updated by the
1686   // rset sampling code
1687   hr->set_recorded_rs_length(rs_length);
1688   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1689 }
1690 
1691 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1692                                                      size_t new_rs_length) {
1693   // Update the CSet information that is dependent on the new RS length
1694   assert(hr->is_young(), "Precondition");
1695   assert(!SafepointSynchronize::is_at_safepoint(),
1696                                                "should not be at a safepoint");
1697 
1698   // We could have updated _inc_cset_recorded_rs_lengths and
1699   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1700   // that atomically, as this code is executed by a concurrent
1701   // refinement thread, potentially concurrently with a mutator thread
1702   // allocating a new region and also updating the same fields. To
1703   // avoid the atomic operations we accumulate these updates on two
1704   // separate fields (*_diffs) and we'll just add them to the "main"
1705   // fields at the start of a GC.
1706 
1707   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1708   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1709   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1710 
1711   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1712   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1713   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1714   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1715 
1716   hr->set_recorded_rs_length(new_rs_length);
1717   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1718 }
1719 
1720 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1721   assert(hr->is_young(), "invariant");
1722   assert(hr->young_index_in_cset() > -1, "should have already been set");
1723   assert(_inc_cset_build_state == Active, "Precondition");
1724 
1725   // We need to clear and set the cached recorded/cached collection set
1726   // information in the heap region here (before the region gets added
1727   // to the collection set). An individual heap region's cached values
1728   // are calculated, aggregated with the policy collection set info,
1729   // and cached in the heap region here (initially) and (subsequently)
1730   // by the Young List sampling code.
1731 
1732   size_t rs_length = hr->rem_set()->occupied();
1733   add_to_incremental_cset_info(hr, rs_length);
1734 
1735   HeapWord* hr_end = hr->end();
1736   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1737 
1738   assert(!hr->in_collection_set(), "invariant");
1739   _g1->register_young_region_with_cset(hr);
1740   assert( hr->next_in_collection_set() == NULL, "invariant");
1741 }
1742 
1743 // Add the region at the RHS of the incremental cset
1744 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1745   // We should only ever be appending survivors at the end of a pause
1746   assert(hr->is_survivor(), "Logic");
1747 
1748   // Do the 'common' stuff
1749   add_region_to_incremental_cset_common(hr);
1750 
1751   // Now add the region at the right hand side
1752   if (_inc_cset_tail == NULL) {
1753     assert(_inc_cset_head == NULL, "invariant");
1754     _inc_cset_head = hr;
1755   } else {
1756     _inc_cset_tail->set_next_in_collection_set(hr);
1757   }
1758   _inc_cset_tail = hr;
1759 }
1760 


1885   YoungList* young_list = _g1->young_list();
1886   finalize_incremental_cset_building();
1887 
1888   guarantee(target_pause_time_ms > 0.0,
1889             err_msg("target_pause_time_ms = %1.6lf should be positive",
1890                     target_pause_time_ms));
1891   guarantee(_collection_set == NULL, "Precondition");
1892 
1893   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1894   double predicted_pause_time_ms = base_time_ms;
1895   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1896 
1897   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1898                 "start choosing CSet",
1899                 ergo_format_size("_pending_cards")
1900                 ergo_format_ms("predicted base time")
1901                 ergo_format_ms("remaining time")
1902                 ergo_format_ms("target pause time"),
1903                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1904 
1905   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young() ? true : false);
1906 
1907   if (collector_state()->last_gc_was_young()) {
1908     _trace_young_gen_time_data.increment_young_collection_count();
1909   } else {
1910     _trace_young_gen_time_data.increment_mixed_collection_count();
1911   }
1912 
1913   // The young list is laid with the survivor regions from the previous
1914   // pause are appended to the RHS of the young list, i.e.
1915   //   [Newly Young Regions ++ Survivors from last pause].
1916 
1917   uint survivor_region_length = young_list->survivor_length();
1918   uint eden_region_length = young_list->eden_length();
1919   init_cset_region_lengths(eden_region_length, survivor_region_length);
1920 
1921   HeapRegion* hr = young_list->first_survivor_region();
1922   while (hr != NULL) {
1923     assert(hr->is_survivor(), "badly formed young list");
1924     // There is a convention that all the young regions in the CSet
1925     // are tagged as "eden", so we do this for the survivors here. We
1926     // use the special set_eden_pre_gc() as it doesn't check that the
1927     // region is free (which is not the case here).


1938   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1939 
1940   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1941                 "add young regions to CSet",
1942                 ergo_format_region("eden")
1943                 ergo_format_region("survivors")
1944                 ergo_format_ms("predicted young region time"),
1945                 eden_region_length, survivor_region_length,
1946                 _inc_cset_predicted_elapsed_time_ms);
1947 
1948   // The number of recorded young regions is the incremental
1949   // collection set's current size
1950   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1951 
1952   double young_end_time_sec = os::elapsedTime();
1953   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1954 
1955   // Set the start of the non-young choice time.
1956   double non_young_start_time_sec = young_end_time_sec;
1957 
1958   if (!collector_state()->gcs_are_young()) {
1959     CollectionSetChooser* cset_chooser = _collectionSetChooser;
1960     cset_chooser->verify();
1961     const uint min_old_cset_length = calc_min_old_cset_length();
1962     const uint max_old_cset_length = calc_max_old_cset_length();
1963 
1964     uint expensive_region_num = 0;
1965     bool check_time_remaining = adaptive_young_list_length();
1966 
1967     HeapRegion* hr = cset_chooser->peek();
1968     while (hr != NULL) {
1969       if (old_cset_region_length() >= max_old_cset_length) {
1970         // Added maximum number of old regions to the CSet.
1971         ergo_verbose2(ErgoCSetConstruction,
1972                       "finish adding old regions to CSet",
1973                       ergo_format_reason("old CSet region num reached max")
1974                       ergo_format_region("old")
1975                       ergo_format_region("max"),
1976                       old_cset_region_length(), max_old_cset_length);
1977         break;
1978       }


1984       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1985       double threshold = (double) G1HeapWastePercent;
1986       if (reclaimable_perc <= threshold) {
1987         // We've added enough old regions that the amount of uncollected
1988         // reclaimable space is at or below the waste threshold. Stop
1989         // adding old regions to the CSet.
1990         ergo_verbose5(ErgoCSetConstruction,
1991                       "finish adding old regions to CSet",
1992                       ergo_format_reason("reclaimable percentage not over threshold")
1993                       ergo_format_region("old")
1994                       ergo_format_region("max")
1995                       ergo_format_byte_perc("reclaimable")
1996                       ergo_format_perc("threshold"),
1997                       old_cset_region_length(),
1998                       max_old_cset_length,
1999                       reclaimable_bytes,
2000                       reclaimable_perc, threshold);
2001         break;
2002       }
2003 
2004       double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2005       if (check_time_remaining) {
2006         if (predicted_time_ms > time_remaining_ms) {
2007           // Too expensive for the current CSet.
2008 
2009           if (old_cset_region_length() >= min_old_cset_length) {
2010             // We have added the minimum number of old regions to the CSet,
2011             // we are done with this CSet.
2012             ergo_verbose4(ErgoCSetConstruction,
2013                           "finish adding old regions to CSet",
2014                           ergo_format_reason("predicted time is too high")
2015                           ergo_format_ms("predicted time")
2016                           ergo_format_ms("remaining time")
2017                           ergo_format_region("old")
2018                           ergo_format_region("min"),
2019                           predicted_time_ms, time_remaining_ms,
2020                           old_cset_region_length(), min_old_cset_length);
2021             break;
2022           }
2023 
2024           // We'll add it anyway given that we haven't reached the


< prev index next >