951 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
952 "otherwise, the subtraction below does not make sense");
953 size_t rs_size =
954 _cur_collection_pause_used_regions_at_start - cset_region_length();
955 size_t cur_used_bytes = _g1->used();
956 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
957 bool last_pause_included_initial_mark = false;
958 bool update_stats = !_g1->evacuation_failed();
959
960 #ifndef PRODUCT
961 if (G1YoungSurvRateVerbose) {
962 gclog_or_tty->cr();
963 _short_lived_surv_rate_group->print();
964 // do that for any other surv rate groups too
965 }
966 #endif // PRODUCT
967
968 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
969 if (last_pause_included_initial_mark) {
970 record_concurrent_mark_init_end(0.0);
971 } else if (need_to_start_conc_mark("end of GC")) {
972 // Note: this might have already been set, if during the last
973 // pause we decided to start a cycle but at the beginning of
974 // this pause we decided to postpone it. That's OK.
975 collector_state()->set_initiate_conc_mark_if_possible(true);
976 }
977
978 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
979
980 if (update_stats) {
981 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
982 // this is where we update the allocation rate of the application
983 double app_time_ms =
984 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
985 if (app_time_ms < MIN_TIMER_GRANULARITY) {
986 // This usually happens due to the timer not having the required
987 // granularity. Some Linuxes are the usual culprits.
988 // We'll just set it to something (arbitrarily) small.
989 app_time_ms = 1.0;
990 }
991 // We maintain the invariant that all objects allocated by mutator
992 // threads will be allocated out of eden regions. So, we can use
993 // the eden region number allocated since the previous GC to
994 // calculate the application's allocate rate. The only exception
995 // to that is humongous objects that are allocated separately. But
1040 if (collector_state()->last_young_gc()) {
1041 // This is supposed to to be the "last young GC" before we start
1042 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1043 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1044
1045 if (next_gc_should_be_mixed("start mixed GCs",
1046 "do not start mixed GCs")) {
1047 collector_state()->set_gcs_are_young(false);
1048 }
1049
1050 collector_state()->set_last_young_gc(false);
1051 }
1052
1053 if (!collector_state()->last_gc_was_young()) {
1054 // This is a mixed GC. Here we decide whether to continue doing
1055 // mixed GCs or not.
1056
1057 if (!next_gc_should_be_mixed("continue mixed GCs",
1058 "do not continue mixed GCs")) {
1059 collector_state()->set_gcs_are_young(true);
1060 }
1061 }
1062
1063 _short_lived_surv_rate_group->start_adding_regions();
1064 // Do that for any other surv rate groups
1065
1066 if (update_stats) {
1067 double cost_per_card_ms = 0.0;
1068 double cost_scan_hcc = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
1069 if (_pending_cards > 0) {
1070 cost_per_card_ms = (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1071 _cost_per_card_ms_seq->add(cost_per_card_ms);
1072 }
1073 _cost_scan_hcc_seq->add(cost_scan_hcc);
1074
1075 double cost_per_entry_ms = 0.0;
1076 if (cards_scanned > 10) {
1077 cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
1078 if (collector_state()->last_gc_was_young()) {
1079 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1915 st->print_cr("\nCollection_set:");
1916 HeapRegion* csr = list_head;
1917 while (csr != NULL) {
1918 HeapRegion* next = csr->next_in_collection_set();
1919 assert(csr->in_collection_set(), "bad CS");
1920 st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
1921 HR_FORMAT_PARAMS(csr),
1922 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
1923 csr->age_in_surv_rate_group_cond());
1924 csr = next;
1925 }
1926 }
1927 #endif // !PRODUCT
1928
1929 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1930 // Returns the given amount of reclaimable bytes (that represents
1931 // the amount of reclaimable space still to be collected) as a
1932 // percentage of the current heap capacity.
1933 size_t capacity_bytes = _g1->capacity();
1934 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1935 }
1936
1937 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1938 const char* false_action_str) const {
1939 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1940 if (cset_chooser->is_empty()) {
1941 ergo_verbose0(ErgoMixedGCs,
1942 false_action_str,
1943 ergo_format_reason("candidate old regions not available"));
1944 return false;
1945 }
1946
1947 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1948 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1949 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1950 double threshold = (double) G1HeapWastePercent;
1951 if (reclaimable_perc <= threshold) {
1952 ergo_verbose4(ErgoMixedGCs,
1953 false_action_str,
1954 ergo_format_reason("reclaimable percentage not over threshold")
|
951 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
952 "otherwise, the subtraction below does not make sense");
953 size_t rs_size =
954 _cur_collection_pause_used_regions_at_start - cset_region_length();
955 size_t cur_used_bytes = _g1->used();
956 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
957 bool last_pause_included_initial_mark = false;
958 bool update_stats = !_g1->evacuation_failed();
959
960 #ifndef PRODUCT
961 if (G1YoungSurvRateVerbose) {
962 gclog_or_tty->cr();
963 _short_lived_surv_rate_group->print();
964 // do that for any other surv rate groups too
965 }
966 #endif // PRODUCT
967
968 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
969 if (last_pause_included_initial_mark) {
970 record_concurrent_mark_init_end(0.0);
971 } else {
972 maybe_start_marking();
973 }
974
975 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
976
977 if (update_stats) {
978 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
979 // this is where we update the allocation rate of the application
980 double app_time_ms =
981 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
982 if (app_time_ms < MIN_TIMER_GRANULARITY) {
983 // This usually happens due to the timer not having the required
984 // granularity. Some Linuxes are the usual culprits.
985 // We'll just set it to something (arbitrarily) small.
986 app_time_ms = 1.0;
987 }
988 // We maintain the invariant that all objects allocated by mutator
989 // threads will be allocated out of eden regions. So, we can use
990 // the eden region number allocated since the previous GC to
991 // calculate the application's allocate rate. The only exception
992 // to that is humongous objects that are allocated separately. But
1037 if (collector_state()->last_young_gc()) {
1038 // This is supposed to to be the "last young GC" before we start
1039 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1040 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1041
1042 if (next_gc_should_be_mixed("start mixed GCs",
1043 "do not start mixed GCs")) {
1044 collector_state()->set_gcs_are_young(false);
1045 }
1046
1047 collector_state()->set_last_young_gc(false);
1048 }
1049
1050 if (!collector_state()->last_gc_was_young()) {
1051 // This is a mixed GC. Here we decide whether to continue doing
1052 // mixed GCs or not.
1053
1054 if (!next_gc_should_be_mixed("continue mixed GCs",
1055 "do not continue mixed GCs")) {
1056 collector_state()->set_gcs_are_young(true);
1057
1058 maybe_start_marking();
1059 }
1060 }
1061
1062 _short_lived_surv_rate_group->start_adding_regions();
1063 // Do that for any other surv rate groups
1064
1065 if (update_stats) {
1066 double cost_per_card_ms = 0.0;
1067 double cost_scan_hcc = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
1068 if (_pending_cards > 0) {
1069 cost_per_card_ms = (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1070 _cost_per_card_ms_seq->add(cost_per_card_ms);
1071 }
1072 _cost_scan_hcc_seq->add(cost_scan_hcc);
1073
1074 double cost_per_entry_ms = 0.0;
1075 if (cards_scanned > 10) {
1076 cost_per_entry_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
1077 if (collector_state()->last_gc_was_young()) {
1078 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1914 st->print_cr("\nCollection_set:");
1915 HeapRegion* csr = list_head;
1916 while (csr != NULL) {
1917 HeapRegion* next = csr->next_in_collection_set();
1918 assert(csr->in_collection_set(), "bad CS");
1919 st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
1920 HR_FORMAT_PARAMS(csr),
1921 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
1922 csr->age_in_surv_rate_group_cond());
1923 csr = next;
1924 }
1925 }
1926 #endif // !PRODUCT
1927
1928 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1929 // Returns the given amount of reclaimable bytes (that represents
1930 // the amount of reclaimable space still to be collected) as a
1931 // percentage of the current heap capacity.
1932 size_t capacity_bytes = _g1->capacity();
1933 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1934 }
1935
1936 void G1CollectorPolicy::maybe_start_marking() {
1937 if (need_to_start_conc_mark("end of GC")) {
1938 // Note: this might have already been set, if during the last
1939 // pause we decided to start a cycle but at the beginning of
1940 // this pause we decided to postpone it. That's OK.
1941 collector_state()->set_initiate_conc_mark_if_possible(true);
1942 }
1943 }
1944
1945 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1946 const char* false_action_str) const {
1947 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1948 if (cset_chooser->is_empty()) {
1949 ergo_verbose0(ErgoMixedGCs,
1950 false_action_str,
1951 ergo_format_reason("candidate old regions not available"));
1952 return false;
1953 }
1954
1955 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1956 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1957 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1958 double threshold = (double) G1HeapWastePercent;
1959 if (reclaimable_perc <= threshold) {
1960 ergo_verbose4(ErgoMixedGCs,
1961 false_action_str,
1962 ergo_format_reason("reclaimable percentage not over threshold")
|