1043
1044 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1045 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1046 assert_used_and_recalculate_used_equal(this);
1047 _verifier->verify_region_sets_optional();
1048 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1049 _verifier->check_bitmaps("Full GC Start");
1050 }
1051
1052 void G1CollectedHeap::prepare_heap_for_mutators() {
1053 hrm()->prepare_for_full_collection_end();
1054
1055 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1056 ClassLoaderDataGraph::purge();
1057 MetaspaceUtils::verify_metrics();
1058
1059 // Prepare heap for normal collections.
1060 assert(num_free_regions() == 0, "we should not have added any free regions");
1061 rebuild_region_sets(false /* free_list_only */);
1062 abort_refinement();
1063 resize_heap_if_necessary();
1064
1065 // Rebuild the strong code root lists for each region
1066 rebuild_strong_code_roots();
1067
1068 // Purge code root memory
1069 purge_code_root_memory();
1070
1071 // Start a new incremental collection set for the next pause
1072 start_new_collection_set();
1073
1074 _allocator->init_mutator_alloc_regions();
1075
1076 // Post collection state updates.
1077 MetaspaceGC::compute_new_size();
1078 }
1079
1080 void G1CollectedHeap::abort_refinement() {
1081 if (_hot_card_cache->use_cache()) {
1082 _hot_card_cache->reset_hot_cache();
1083 }
1150
1151 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1152 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1153
1154 collector.prepare_collection();
1155 collector.collect();
1156 collector.complete_collection();
1157
1158 // Full collection was successfully completed.
1159 return true;
1160 }
1161
1162 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1163 // Currently, there is no facility in the do_full_collection(bool) API to notify
1164 // the caller that the collection did not succeed (e.g., because it was locked
1165 // out by the GC locker). So, right now, we'll ignore the return value.
1166 bool dummy = do_full_collection(true, /* explicit_gc */
1167 clear_all_soft_refs);
1168 }
1169
1170 void G1CollectedHeap::resize_heap_if_necessary() {
1171 assert_at_safepoint_on_vm_thread();
1172
1173 // Capacity, free and used after the GC counted as full regions to
1174 // include the waste in the following calculations.
1175 const size_t capacity_after_gc = capacity();
1176 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1177
1178 // This is enforced in arguments.cpp.
1179 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1180 "otherwise the code below doesn't make sense");
1181
1182 // We don't have floating point command-line arguments
1183 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1184 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1185 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1186 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1187
1188 // We have to be careful here as these two calculations can overflow
1189 // 32-bit size_t's.
1190 double used_after_gc_d = (double) used_after_gc;
1191 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1192 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1193
1194 // Let's make sure that they are both under the max heap size, which
1195 // by default will make them fit into a size_t.
1196 double desired_capacity_upper_bound = (double) MaxHeapSize;
1197 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1198 desired_capacity_upper_bound);
1199 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1200 desired_capacity_upper_bound);
1201
1202 // We can now safely turn them into size_t's.
1203 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1204 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1205
1206 // This assert only makes sense here, before we adjust them
1207 // with respect to the min and max heap size.
1208 assert(minimum_desired_capacity <= maximum_desired_capacity,
1209 "minimum_desired_capacity = " SIZE_FORMAT ", "
1210 "maximum_desired_capacity = " SIZE_FORMAT,
1211 minimum_desired_capacity, maximum_desired_capacity);
1212
1213 // Should not be greater than the heap max size. No need to adjust
1214 // it with respect to the heap min size as it's a lower bound (i.e.,
1215 // we'll try to make the capacity larger than it, not smaller).
1216 minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1217 // Should not be less than the heap min size. No need to adjust it
1218 // with respect to the heap max size as it's an upper bound (i.e.,
1219 // we'll try to make the capacity smaller than it, not greater).
1220 maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
1221
1222 if (capacity_after_gc < minimum_desired_capacity) {
1223 // Don't expand unless it's significant
1224 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
2409 }
2410
2411 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2412 // must be equal to the humongous object limit.
2413 size_t G1CollectedHeap::max_tlab_size() const {
2414 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2415 }
2416
2417 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2418 return _allocator->unsafe_max_tlab_alloc();
2419 }
2420
2421 size_t G1CollectedHeap::max_capacity() const {
2422 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2423 }
2424
2425 size_t G1CollectedHeap::max_reserved_capacity() const {
2426 return _hrm->max_length() * HeapRegion::GrainBytes;
2427 }
2428
2429 jlong G1CollectedHeap::millis_since_last_gc() {
2430 // See the notes in GenCollectedHeap::millis_since_last_gc()
2431 // for more information about the implementation.
2432 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2433 _policy->collection_pause_end_millis();
2434 if (ret_val < 0) {
2435 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2436 ". returning zero instead.", ret_val);
2437 return 0;
2438 }
2439 return ret_val;
2440 }
2441
2442 void G1CollectedHeap::deduplicate_string(oop str) {
2443 assert(java_lang_String::is_instance(str), "invariant");
2444
2445 if (G1StringDedup::is_enabled()) {
2446 G1StringDedup::deduplicate(str);
2447 }
2448 }
2935 log_info(gc, verify)("[Verifying RemSets before GC]");
2936 VerifyRegionRemSetClosure v_cl;
2937 heap_region_iterate(&v_cl);
2938 }
2939 _verifier->verify_before_gc(type);
2940 _verifier->check_bitmaps("GC Start");
2941 verify_numa_regions("GC Start");
2942 }
2943
2944 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2945 if (VerifyRememberedSets) {
2946 log_info(gc, verify)("[Verifying RemSets after GC]");
2947 VerifyRegionRemSetClosure v_cl;
2948 heap_region_iterate(&v_cl);
2949 }
2950 _verifier->verify_after_gc(type);
2951 _verifier->check_bitmaps("GC End");
2952 verify_numa_regions("GC End");
2953 }
2954
2955 void G1CollectedHeap::expand_heap_after_young_collection(){
2956 size_t expand_bytes = _heap_sizing_policy->expansion_amount();
2957 if (expand_bytes > 0) {
2958 // No need for an ergo logging here,
2959 // expansion_amount() does this when it returns a value > 0.
2960 double expand_ms;
2961 if (!expand(expand_bytes, _workers, &expand_ms)) {
2962 // We failed to expand the heap. Cannot do anything about it.
2963 }
2964 phase_times()->record_expand_heap_time(expand_ms);
2965 }
2966 }
2967
2968 const char* G1CollectedHeap::young_gc_name() const {
2969 if (collector_state()->in_initial_mark_gc()) {
2970 return "Pause Young (Concurrent Start)";
2971 } else if (collector_state()->in_young_only_phase()) {
2972 if (collector_state()->in_young_gc_before_mixed()) {
2973 return "Pause Young (Prepare Mixed)";
2974 } else {
2975 return "Pause Young (Normal)";
2976 }
2977 } else {
2978 return "Pause Young (Mixed)";
2979 }
2980 }
2981
2982 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2983 assert_at_safepoint_on_vm_thread();
2984 guarantee(!is_gc_active(), "collection is not reentrant");
3100
3101 start_new_collection_set();
3102
3103 _survivor_evac_stats.adjust_desired_plab_sz();
3104 _old_evac_stats.adjust_desired_plab_sz();
3105
3106 if (should_start_conc_mark) {
3107 // We have to do this before we notify the CM threads that
3108 // they can start working to make sure that all the
3109 // appropriate initialization is done on the CM object.
3110 concurrent_mark()->post_initial_mark();
3111 // Note that we don't actually trigger the CM thread at
3112 // this point. We do that later when we're sure that
3113 // the current thread has completed its logging output.
3114 }
3115
3116 allocate_dummy_regions();
3117
3118 _allocator->init_mutator_alloc_regions();
3119
3120 expand_heap_after_young_collection();
3121
3122 double sample_end_time_sec = os::elapsedTime();
3123 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3124 policy()->record_collection_pause_end(pause_time_ms);
3125 }
3126
3127 verify_after_young_collection(verify_type);
3128
3129 #ifdef TRACESPINNING
3130 ParallelTaskTerminator::print_termination_counts();
3131 #endif
3132
3133 gc_epilogue(false);
3134 }
3135
3136 // Print the remainder of the GC log output.
3137 if (evacuation_failed()) {
3138 log_info(gc)("To-space exhausted");
3139 }
3140
|
1043
1044 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1045 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1046 assert_used_and_recalculate_used_equal(this);
1047 _verifier->verify_region_sets_optional();
1048 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1049 _verifier->check_bitmaps("Full GC Start");
1050 }
1051
1052 void G1CollectedHeap::prepare_heap_for_mutators() {
1053 hrm()->prepare_for_full_collection_end();
1054
1055 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1056 ClassLoaderDataGraph::purge();
1057 MetaspaceUtils::verify_metrics();
1058
1059 // Prepare heap for normal collections.
1060 assert(num_free_regions() == 0, "we should not have added any free regions");
1061 rebuild_region_sets(false /* free_list_only */);
1062 abort_refinement();
1063
1064 resize_heap_after_full_gc();
1065
1066 // Rebuild the strong code root lists for each region
1067 rebuild_strong_code_roots();
1068
1069 // Purge code root memory
1070 purge_code_root_memory();
1071
1072 // Start a new incremental collection set for the next pause
1073 start_new_collection_set();
1074
1075 _allocator->init_mutator_alloc_regions();
1076
1077 // Post collection state updates.
1078 MetaspaceGC::compute_new_size();
1079 }
1080
1081 void G1CollectedHeap::abort_refinement() {
1082 if (_hot_card_cache->use_cache()) {
1083 _hot_card_cache->reset_hot_cache();
1084 }
1151
1152 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1153 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1154
1155 collector.prepare_collection();
1156 collector.collect();
1157 collector.complete_collection();
1158
1159 // Full collection was successfully completed.
1160 return true;
1161 }
1162
1163 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1164 // Currently, there is no facility in the do_full_collection(bool) API to notify
1165 // the caller that the collection did not succeed (e.g., because it was locked
1166 // out by the GC locker). So, right now, we'll ignore the return value.
1167 bool dummy = do_full_collection(true, /* explicit_gc */
1168 clear_all_soft_refs);
1169 }
1170
1171 void G1CollectedHeap::resize_heap_after_full_gc() {
1172 assert_at_safepoint_on_vm_thread();
1173 assert(collector_state()->in_full_gc(), "Must be");
1174
1175 // Capacity, free and used after the GC counted as full regions to
1176 // include the waste in the following calculations.
1177 const size_t capacity_after_gc = capacity();
1178 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1179
1180 size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio);
1181 size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
1182
1183 // This assert only makes sense here, before we adjust them
1184 // with respect to the min and max heap size.
1185 assert(minimum_desired_capacity <= maximum_desired_capacity,
1186 "minimum_desired_capacity = " SIZE_FORMAT ", "
1187 "maximum_desired_capacity = " SIZE_FORMAT,
1188 minimum_desired_capacity, maximum_desired_capacity);
1189
1190 // Should not be greater than the heap max size. No need to adjust
1191 // it with respect to the heap min size as it's a lower bound (i.e.,
1192 // we'll try to make the capacity larger than it, not smaller).
1193 minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1194 // Should not be less than the heap min size. No need to adjust it
1195 // with respect to the heap max size as it's an upper bound (i.e.,
1196 // we'll try to make the capacity smaller than it, not greater).
1197 maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
1198
1199 if (capacity_after_gc < minimum_desired_capacity) {
1200 // Don't expand unless it's significant
1201 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
2386 }
2387
2388 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2389 // must be equal to the humongous object limit.
2390 size_t G1CollectedHeap::max_tlab_size() const {
2391 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2392 }
2393
2394 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2395 return _allocator->unsafe_max_tlab_alloc();
2396 }
2397
2398 size_t G1CollectedHeap::max_capacity() const {
2399 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2400 }
2401
2402 size_t G1CollectedHeap::max_reserved_capacity() const {
2403 return _hrm->max_length() * HeapRegion::GrainBytes;
2404 }
2405
2406 size_t G1CollectedHeap::soft_max_capacity() const {
2407 return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity());
2408 }
2409
2410 jlong G1CollectedHeap::millis_since_last_gc() {
2411 // See the notes in GenCollectedHeap::millis_since_last_gc()
2412 // for more information about the implementation.
2413 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2414 _policy->collection_pause_end_millis();
2415 if (ret_val < 0) {
2416 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2417 ". returning zero instead.", ret_val);
2418 return 0;
2419 }
2420 return ret_val;
2421 }
2422
2423 void G1CollectedHeap::deduplicate_string(oop str) {
2424 assert(java_lang_String::is_instance(str), "invariant");
2425
2426 if (G1StringDedup::is_enabled()) {
2427 G1StringDedup::deduplicate(str);
2428 }
2429 }
2916 log_info(gc, verify)("[Verifying RemSets before GC]");
2917 VerifyRegionRemSetClosure v_cl;
2918 heap_region_iterate(&v_cl);
2919 }
2920 _verifier->verify_before_gc(type);
2921 _verifier->check_bitmaps("GC Start");
2922 verify_numa_regions("GC Start");
2923 }
2924
2925 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2926 if (VerifyRememberedSets) {
2927 log_info(gc, verify)("[Verifying RemSets after GC]");
2928 VerifyRegionRemSetClosure v_cl;
2929 heap_region_iterate(&v_cl);
2930 }
2931 _verifier->verify_after_gc(type);
2932 _verifier->check_bitmaps("GC End");
2933 verify_numa_regions("GC End");
2934 }
2935
2936 void G1CollectedHeap::resize_heap_after_young_collection() {
2937 Ticks start = Ticks::now();
2938 if (!expand_heap_after_young_collection()) {
2939 // If we don't attempt to expand heap, try if we need to shrink the heap
2940 shrink_heap_after_young_collection();
2941 }
2942 phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0);
2943 }
2944
2945 bool G1CollectedHeap::expand_heap_after_young_collection(){
2946 size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_young_collection();
2947 if (expand_bytes > 0) {
2948 if (expand(expand_bytes, _workers, NULL)) {
2949 // We failed to expand the heap. Cannot do anything about it.
2950 }
2951 return true;
2952 }
2953 return false;
2954 }
2955
2956 void G1CollectedHeap::shrink_heap_after_young_collection() {
2957 if (collector_state()->in_young_only_phase() || policy()->next_gc_should_be_mixed()) {
2958 // Do the shrink during gc only at the end of mixed gc phase
2959 return;
2960 }
2961 size_t shrink_bytes = _heap_sizing_policy->shrink_amount_at_last_mixed_gc(policy()->desired_bytes_after_concurrent_mark());
2962 if (shrink_bytes > 0) {
2963 shrink(shrink_bytes);
2964 }
2965 }
2966
2967 void G1CollectedHeap::shrink_heap_after_concurrent_mark() {
2968 size_t shrink_bytes = _heap_sizing_policy->shrink_amount_after_concurrent_mark();
2969 if (shrink_bytes > 0) {
2970 shrink(shrink_bytes);
2971 }
2972 }
2973
2974 const char* G1CollectedHeap::young_gc_name() const {
2975 if (collector_state()->in_initial_mark_gc()) {
2976 return "Pause Young (Concurrent Start)";
2977 } else if (collector_state()->in_young_only_phase()) {
2978 if (collector_state()->in_young_gc_before_mixed()) {
2979 return "Pause Young (Prepare Mixed)";
2980 } else {
2981 return "Pause Young (Normal)";
2982 }
2983 } else {
2984 return "Pause Young (Mixed)";
2985 }
2986 }
2987
2988 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2989 assert_at_safepoint_on_vm_thread();
2990 guarantee(!is_gc_active(), "collection is not reentrant");
3106
3107 start_new_collection_set();
3108
3109 _survivor_evac_stats.adjust_desired_plab_sz();
3110 _old_evac_stats.adjust_desired_plab_sz();
3111
3112 if (should_start_conc_mark) {
3113 // We have to do this before we notify the CM threads that
3114 // they can start working to make sure that all the
3115 // appropriate initialization is done on the CM object.
3116 concurrent_mark()->post_initial_mark();
3117 // Note that we don't actually trigger the CM thread at
3118 // this point. We do that later when we're sure that
3119 // the current thread has completed its logging output.
3120 }
3121
3122 allocate_dummy_regions();
3123
3124 _allocator->init_mutator_alloc_regions();
3125
3126 resize_heap_after_young_collection();
3127
3128 double sample_end_time_sec = os::elapsedTime();
3129 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3130 policy()->record_collection_pause_end(pause_time_ms);
3131 }
3132
3133 verify_after_young_collection(verify_type);
3134
3135 #ifdef TRACESPINNING
3136 ParallelTaskTerminator::print_termination_counts();
3137 #endif
3138
3139 gc_epilogue(false);
3140 }
3141
3142 // Print the remainder of the GC log output.
3143 if (evacuation_failed()) {
3144 log_info(gc)("To-space exhausted");
3145 }
3146
|