1044
1045 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1046 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1047 assert_used_and_recalculate_used_equal(this);
1048 _verifier->verify_region_sets_optional();
1049 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1050 _verifier->check_bitmaps("Full GC Start");
1051 }
1052
1053 void G1CollectedHeap::prepare_heap_for_mutators() {
1054 hrm()->prepare_for_full_collection_end();
1055
1056 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1057 ClassLoaderDataGraph::purge();
1058 MetaspaceUtils::verify_metrics();
1059
1060 // Prepare heap for normal collections.
1061 assert(num_free_regions() == 0, "we should not have added any free regions");
1062 rebuild_region_sets(false /* free_list_only */);
1063 abort_refinement();
1064 resize_heap_if_necessary();
1065
1066 // Rebuild the strong code root lists for each region
1067 rebuild_strong_code_roots();
1068
1069 // Purge code root memory
1070 purge_code_root_memory();
1071
1072 // Start a new incremental collection set for the next pause
1073 start_new_collection_set();
1074
1075 _allocator->init_mutator_alloc_regions();
1076
1077 // Post collection state updates.
1078 MetaspaceGC::compute_new_size();
1079 }
1080
1081 void G1CollectedHeap::abort_refinement() {
1082 if (_hot_card_cache->use_cache()) {
1083 _hot_card_cache->reset_hot_cache();
1084 }
1148
1149 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1150 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1151
1152 collector.prepare_collection();
1153 collector.collect();
1154 collector.complete_collection();
1155
1156 // Full collection was successfully completed.
1157 return true;
1158 }
1159
1160 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1161 // Currently, there is no facility in the do_full_collection(bool) API to notify
1162 // the caller that the collection did not succeed (e.g., because it was locked
1163 // out by the GC locker). So, right now, we'll ignore the return value.
1164 bool dummy = do_full_collection(true, /* explicit_gc */
1165 clear_all_soft_refs);
1166 }
1167
1168 void G1CollectedHeap::resize_heap_if_necessary() {
1169 assert_at_safepoint_on_vm_thread();
1170
1171 // Capacity, free and used after the GC counted as full regions to
1172 // include the waste in the following calculations.
1173 const size_t capacity_after_gc = capacity();
1174 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1175
1176 // This is enforced in arguments.cpp.
1177 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1178 "otherwise the code below doesn't make sense");
1179
1180 // We don't have floating point command-line arguments
1181 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1182 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1183 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1184 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1185
1186 // We have to be careful here as these two calculations can overflow
1187 // 32-bit size_t's.
1188 double used_after_gc_d = (double) used_after_gc;
1189 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1190 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1191
1192 // Let's make sure that they are both under the max heap size, which
1193 // by default will make them fit into a size_t.
1194 double desired_capacity_upper_bound = (double) MaxHeapSize;
1195 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1196 desired_capacity_upper_bound);
1197 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1198 desired_capacity_upper_bound);
1199
1200 // We can now safely turn them into size_t's.
1201 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1202 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1203
1204 // This assert only makes sense here, before we adjust them
1205 // with respect to the min and max heap size.
1206 assert(minimum_desired_capacity <= maximum_desired_capacity,
1207 "minimum_desired_capacity = " SIZE_FORMAT ", "
1208 "maximum_desired_capacity = " SIZE_FORMAT,
1209 minimum_desired_capacity, maximum_desired_capacity);
1210
1211 // Should not be greater than the heap max size. No need to adjust
1212 // it with respect to the heap min size as it's a lower bound (i.e.,
1213 // we'll try to make the capacity larger than it, not smaller).
1214 minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1215 // Should not be less than the heap min size. No need to adjust it
1216 // with respect to the heap max size as it's an upper bound (i.e.,
1217 // we'll try to make the capacity smaller than it, not greater).
1218 maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
1219
1220 if (capacity_after_gc < minimum_desired_capacity) {
1221 // Don't expand unless it's significant
1222 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
2407 }
2408
2409 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2410 // must be equal to the humongous object limit.
2411 size_t G1CollectedHeap::max_tlab_size() const {
2412 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2413 }
2414
2415 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2416 return _allocator->unsafe_max_tlab_alloc();
2417 }
2418
2419 size_t G1CollectedHeap::max_capacity() const {
2420 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2421 }
2422
2423 size_t G1CollectedHeap::max_reserved_capacity() const {
2424 return _hrm->max_length() * HeapRegion::GrainBytes;
2425 }
2426
2427 jlong G1CollectedHeap::millis_since_last_gc() {
2428 // See the notes in GenCollectedHeap::millis_since_last_gc()
2429 // for more information about the implementation.
2430 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2431 _policy->collection_pause_end_millis();
2432 if (ret_val < 0) {
2433 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2434 ". returning zero instead.", ret_val);
2435 return 0;
2436 }
2437 return ret_val;
2438 }
2439
2440 void G1CollectedHeap::deduplicate_string(oop str) {
2441 assert(java_lang_String::is_instance(str), "invariant");
2442
2443 if (G1StringDedup::is_enabled()) {
2444 G1StringDedup::deduplicate(str);
2445 }
2446 }
2931 log_info(gc, verify)("[Verifying RemSets before GC]");
2932 VerifyRegionRemSetClosure v_cl;
2933 heap_region_iterate(&v_cl);
2934 }
2935 _verifier->verify_before_gc(type);
2936 _verifier->check_bitmaps("GC Start");
2937 verify_numa_regions("GC Start");
2938 }
2939
2940 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2941 if (VerifyRememberedSets) {
2942 log_info(gc, verify)("[Verifying RemSets after GC]");
2943 VerifyRegionRemSetClosure v_cl;
2944 heap_region_iterate(&v_cl);
2945 }
2946 _verifier->verify_after_gc(type);
2947 _verifier->check_bitmaps("GC End");
2948 verify_numa_regions("GC End");
2949 }
2950
2951 void G1CollectedHeap::expand_heap_after_young_collection(){
2952 size_t expand_bytes = _heap_sizing_policy->expansion_amount();
2953 if (expand_bytes > 0) {
2954 // No need for an ergo logging here,
2955 // expansion_amount() does this when it returns a value > 0.
2956 double expand_ms;
2957 if (!expand(expand_bytes, _workers, &expand_ms)) {
2958 // We failed to expand the heap. Cannot do anything about it.
2959 }
2960 phase_times()->record_expand_heap_time(expand_ms);
2961 }
2962 }
2963
2964 const char* G1CollectedHeap::young_gc_name() const {
2965 if (collector_state()->in_initial_mark_gc()) {
2966 return "Pause Young (Concurrent Start)";
2967 } else if (collector_state()->in_young_only_phase()) {
2968 if (collector_state()->in_young_gc_before_mixed()) {
2969 return "Pause Young (Prepare Mixed)";
2970 } else {
2971 return "Pause Young (Normal)";
2972 }
2973 } else {
2974 return "Pause Young (Mixed)";
2975 }
2976 }
2977
2978 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2979 assert_at_safepoint_on_vm_thread();
2980 guarantee(!is_gc_active(), "collection is not reentrant");
3109
3110 start_new_collection_set();
3111
3112 _survivor_evac_stats.adjust_desired_plab_sz();
3113 _old_evac_stats.adjust_desired_plab_sz();
3114
3115 if (should_start_conc_mark) {
3116 // We have to do this before we notify the CM threads that
3117 // they can start working to make sure that all the
3118 // appropriate initialization is done on the CM object.
3119 concurrent_mark()->post_initial_mark();
3120 // Note that we don't actually trigger the CM thread at
3121 // this point. We do that later when we're sure that
3122 // the current thread has completed its logging output.
3123 }
3124
3125 allocate_dummy_regions();
3126
3127 _allocator->init_mutator_alloc_regions();
3128
3129 expand_heap_after_young_collection();
3130
3131 double sample_end_time_sec = os::elapsedTime();
3132 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3133 policy()->record_collection_pause_end(pause_time_ms);
3134 }
3135
3136 verify_after_young_collection(verify_type);
3137
3138 gc_epilogue(false);
3139 }
3140
3141 // Print the remainder of the GC log output.
3142 if (evacuation_failed()) {
3143 log_info(gc)("To-space exhausted");
3144 }
3145
3146 policy()->print_phases();
3147 heap_transition.print();
3148
3149 _hrm->verify_optional();
|
1044
1045 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1046 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1047 assert_used_and_recalculate_used_equal(this);
1048 _verifier->verify_region_sets_optional();
1049 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1050 _verifier->check_bitmaps("Full GC Start");
1051 }
1052
1053 void G1CollectedHeap::prepare_heap_for_mutators() {
1054 hrm()->prepare_for_full_collection_end();
1055
1056 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1057 ClassLoaderDataGraph::purge();
1058 MetaspaceUtils::verify_metrics();
1059
1060 // Prepare heap for normal collections.
1061 assert(num_free_regions() == 0, "we should not have added any free regions");
1062 rebuild_region_sets(false /* free_list_only */);
1063 abort_refinement();
1064
1065 resize_heap_after_full_gc();
1066
1067 // Rebuild the strong code root lists for each region
1068 rebuild_strong_code_roots();
1069
1070 // Purge code root memory
1071 purge_code_root_memory();
1072
1073 // Start a new incremental collection set for the next pause
1074 start_new_collection_set();
1075
1076 _allocator->init_mutator_alloc_regions();
1077
1078 // Post collection state updates.
1079 MetaspaceGC::compute_new_size();
1080 }
1081
1082 void G1CollectedHeap::abort_refinement() {
1083 if (_hot_card_cache->use_cache()) {
1084 _hot_card_cache->reset_hot_cache();
1085 }
1149
1150 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1151 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1152
1153 collector.prepare_collection();
1154 collector.collect();
1155 collector.complete_collection();
1156
1157 // Full collection was successfully completed.
1158 return true;
1159 }
1160
1161 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1162 // Currently, there is no facility in the do_full_collection(bool) API to notify
1163 // the caller that the collection did not succeed (e.g., because it was locked
1164 // out by the GC locker). So, right now, we'll ignore the return value.
1165 bool dummy = do_full_collection(true, /* explicit_gc */
1166 clear_all_soft_refs);
1167 }
1168
1169 void G1CollectedHeap::resize_heap_after_full_gc() {
1170 assert_at_safepoint_on_vm_thread();
1171 assert(collector_state()->in_full_gc(), "Must be");
1172
1173 // Capacity, free and used after the GC counted as full regions to
1174 // include the waste in the following calculations.
1175 const size_t capacity_after_gc = capacity();
1176 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1177
1178 size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio);
1179 size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
1180
1181 // This assert only makes sense here, before we adjust them
1182 // with respect to the min and max heap size.
1183 assert(minimum_desired_capacity <= maximum_desired_capacity,
1184 "minimum_desired_capacity = " SIZE_FORMAT ", "
1185 "maximum_desired_capacity = " SIZE_FORMAT,
1186 minimum_desired_capacity, maximum_desired_capacity);
1187
1188 // Should not be greater than the heap max size. No need to adjust
1189 // it with respect to the heap min size as it's a lower bound (i.e.,
1190 // we'll try to make the capacity larger than it, not smaller).
1191 minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1192 // Should not be less than the heap min size. No need to adjust it
1193 // with respect to the heap max size as it's an upper bound (i.e.,
1194 // we'll try to make the capacity smaller than it, not greater).
1195 maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
1196
1197 if (capacity_after_gc < minimum_desired_capacity) {
1198 // Don't expand unless it's significant
1199 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
2384 }
2385
2386 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2387 // must be equal to the humongous object limit.
2388 size_t G1CollectedHeap::max_tlab_size() const {
2389 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2390 }
2391
2392 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2393 return _allocator->unsafe_max_tlab_alloc();
2394 }
2395
2396 size_t G1CollectedHeap::max_capacity() const {
2397 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2398 }
2399
2400 size_t G1CollectedHeap::max_reserved_capacity() const {
2401 return _hrm->max_length() * HeapRegion::GrainBytes;
2402 }
2403
2404 size_t G1CollectedHeap::soft_max_capacity() const {
2405 return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity());
2406 }
2407
2408 jlong G1CollectedHeap::millis_since_last_gc() {
2409 // See the notes in GenCollectedHeap::millis_since_last_gc()
2410 // for more information about the implementation.
2411 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2412 _policy->collection_pause_end_millis();
2413 if (ret_val < 0) {
2414 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2415 ". returning zero instead.", ret_val);
2416 return 0;
2417 }
2418 return ret_val;
2419 }
2420
2421 void G1CollectedHeap::deduplicate_string(oop str) {
2422 assert(java_lang_String::is_instance(str), "invariant");
2423
2424 if (G1StringDedup::is_enabled()) {
2425 G1StringDedup::deduplicate(str);
2426 }
2427 }
2912 log_info(gc, verify)("[Verifying RemSets before GC]");
2913 VerifyRegionRemSetClosure v_cl;
2914 heap_region_iterate(&v_cl);
2915 }
2916 _verifier->verify_before_gc(type);
2917 _verifier->check_bitmaps("GC Start");
2918 verify_numa_regions("GC Start");
2919 }
2920
2921 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2922 if (VerifyRememberedSets) {
2923 log_info(gc, verify)("[Verifying RemSets after GC]");
2924 VerifyRegionRemSetClosure v_cl;
2925 heap_region_iterate(&v_cl);
2926 }
2927 _verifier->verify_after_gc(type);
2928 _verifier->check_bitmaps("GC End");
2929 verify_numa_regions("GC End");
2930 }
2931
2932 void G1CollectedHeap::resize_heap_after_young_collection() {
2933 Ticks start = Ticks::now();
2934 if (!expand_heap_after_young_collection()) {
2935 // If we don't attempt to expand heap, try if we need to shrink the heap
2936 shrink_heap_after_young_collection();
2937 }
2938 phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0);
2939 }
2940
2941 bool G1CollectedHeap::expand_heap_after_young_collection(){
2942 size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_young_collection();
2943 if (expand_bytes > 0) {
2944 if (expand(expand_bytes, _workers, NULL)) {
2945 // We failed to expand the heap. Cannot do anything about it.
2946 }
2947 return true;
2948 }
2949 return false;
2950 }
2951
2952 void G1CollectedHeap::shrink_heap_after_young_collection() {
2953 if (collector_state()->in_young_only_phase() || policy()->next_gc_should_be_mixed()) {
2954 // Do the shrink during gc only at the end of mixed gc phase
2955 return;
2956 }
2957 size_t shrink_bytes = _heap_sizing_policy->shrink_amount_at_last_mixed_gc(policy()->desired_bytes_after_concurrent_mark());
2958 if (shrink_bytes > 0) {
2959 shrink(shrink_bytes);
2960 }
2961 }
2962
2963 const char* G1CollectedHeap::young_gc_name() const {
2964 if (collector_state()->in_initial_mark_gc()) {
2965 return "Pause Young (Concurrent Start)";
2966 } else if (collector_state()->in_young_only_phase()) {
2967 if (collector_state()->in_young_gc_before_mixed()) {
2968 return "Pause Young (Prepare Mixed)";
2969 } else {
2970 return "Pause Young (Normal)";
2971 }
2972 } else {
2973 return "Pause Young (Mixed)";
2974 }
2975 }
2976
2977 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2978 assert_at_safepoint_on_vm_thread();
2979 guarantee(!is_gc_active(), "collection is not reentrant");
3108
3109 start_new_collection_set();
3110
3111 _survivor_evac_stats.adjust_desired_plab_sz();
3112 _old_evac_stats.adjust_desired_plab_sz();
3113
3114 if (should_start_conc_mark) {
3115 // We have to do this before we notify the CM threads that
3116 // they can start working to make sure that all the
3117 // appropriate initialization is done on the CM object.
3118 concurrent_mark()->post_initial_mark();
3119 // Note that we don't actually trigger the CM thread at
3120 // this point. We do that later when we're sure that
3121 // the current thread has completed its logging output.
3122 }
3123
3124 allocate_dummy_regions();
3125
3126 _allocator->init_mutator_alloc_regions();
3127
3128 resize_heap_after_young_collection();
3129
3130 double sample_end_time_sec = os::elapsedTime();
3131 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3132 policy()->record_collection_pause_end(pause_time_ms);
3133 }
3134
3135 verify_after_young_collection(verify_type);
3136
3137 gc_epilogue(false);
3138 }
3139
3140 // Print the remainder of the GC log output.
3141 if (evacuation_failed()) {
3142 log_info(gc)("To-space exhausted");
3143 }
3144
3145 policy()->print_phases();
3146 heap_transition.print();
3147
3148 _hrm->verify_optional();
|