1162 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1163 // Currently, there is no facility in the do_full_collection(bool) API to notify
1164 // the caller that the collection did not succeed (e.g., because it was locked
1165 // out by the GC locker). So, right now, we'll ignore the return value.
1166 bool dummy = do_full_collection(true, /* explicit_gc */
1167 clear_all_soft_refs);
1168 }
1169
1170 void G1CollectedHeap::resize_heap_if_necessary() {
1171 assert_at_safepoint_on_vm_thread();
1172
1173 // Capacity, free and used after the GC counted as full regions to
1174 // include the waste in the following calculations.
1175 const size_t capacity_after_gc = capacity();
1176 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1177
1178 // This is enforced in arguments.cpp.
1179 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1180 "otherwise the code below doesn't make sense");
1181
1182 // We don't have floating point command-line arguments
1183 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1184 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1185 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1186 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1187
1188 // We have to be careful here as these two calculations can overflow
1189 // 32-bit size_t's.
1190 double used_after_gc_d = (double) used_after_gc;
1191 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1192 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1193
1194 // Let's make sure that they are both under the max heap size, which
1195 // by default will make them fit into a size_t.
1196 double desired_capacity_upper_bound = (double) MaxHeapSize;
1197 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1198 desired_capacity_upper_bound);
1199 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1200 desired_capacity_upper_bound);
1201
1202 // We can now safely turn them into size_t's.
1203 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1204 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1205
1206 // This assert only makes sense here, before we adjust them
1207 // with respect to the min and max heap size.
1208 assert(minimum_desired_capacity <= maximum_desired_capacity,
1209 "minimum_desired_capacity = " SIZE_FORMAT ", "
1210 "maximum_desired_capacity = " SIZE_FORMAT,
1211 minimum_desired_capacity, maximum_desired_capacity);
1212
1213 // Should not be greater than the heap max size. No need to adjust
1214 // it with respect to the heap min size as it's a lower bound (i.e.,
1215 // we'll try to make the capacity larger than it, not smaller).
1216 minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1217 // Should not be less than the heap min size. No need to adjust it
1218 // with respect to the heap max size as it's an upper bound (i.e.,
1219 // we'll try to make the capacity smaller than it, not greater).
1220 maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
1221
1222 if (capacity_after_gc < minimum_desired_capacity) {
1223 // Don't expand unless it's significant
1224 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
2409 }
2410
2411 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2412 // must be equal to the humongous object limit.
2413 size_t G1CollectedHeap::max_tlab_size() const {
2414 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2415 }
2416
2417 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2418 return _allocator->unsafe_max_tlab_alloc();
2419 }
2420
2421 size_t G1CollectedHeap::max_capacity() const {
2422 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2423 }
2424
2425 size_t G1CollectedHeap::max_reserved_capacity() const {
2426 return _hrm->max_length() * HeapRegion::GrainBytes;
2427 }
2428
2429 jlong G1CollectedHeap::millis_since_last_gc() {
2430 // See the notes in GenCollectedHeap::millis_since_last_gc()
2431 // for more information about the implementation.
2432 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2433 _policy->collection_pause_end_millis();
2434 if (ret_val < 0) {
2435 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2436 ". returning zero instead.", ret_val);
2437 return 0;
2438 }
2439 return ret_val;
2440 }
2441
2442 void G1CollectedHeap::deduplicate_string(oop str) {
2443 assert(java_lang_String::is_instance(str), "invariant");
2444
2445 if (G1StringDedup::is_enabled()) {
2446 G1StringDedup::deduplicate(str);
2447 }
2448 }
2935 log_info(gc, verify)("[Verifying RemSets before GC]");
2936 VerifyRegionRemSetClosure v_cl;
2937 heap_region_iterate(&v_cl);
2938 }
2939 _verifier->verify_before_gc(type);
2940 _verifier->check_bitmaps("GC Start");
2941 verify_numa_regions("GC Start");
2942 }
2943
2944 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2945 if (VerifyRememberedSets) {
2946 log_info(gc, verify)("[Verifying RemSets after GC]");
2947 VerifyRegionRemSetClosure v_cl;
2948 heap_region_iterate(&v_cl);
2949 }
2950 _verifier->verify_after_gc(type);
2951 _verifier->check_bitmaps("GC End");
2952 verify_numa_regions("GC End");
2953 }
2954
2955 void G1CollectedHeap::expand_heap_after_young_collection(){
2956 size_t expand_bytes = _heap_sizing_policy->expansion_amount();
2957 if (expand_bytes > 0) {
2958 // No need for an ergo logging here,
2959 // expansion_amount() does this when it returns a value > 0.
2960 double expand_ms;
2961 if (!expand(expand_bytes, _workers, &expand_ms)) {
2962 // We failed to expand the heap. Cannot do anything about it.
2963 }
2964 phase_times()->record_expand_heap_time(expand_ms);
2965 }
2966 }
2967
2968 const char* G1CollectedHeap::young_gc_name() const {
2969 if (collector_state()->in_initial_mark_gc()) {
2970 return "Pause Young (Concurrent Start)";
2971 } else if (collector_state()->in_young_only_phase()) {
2972 if (collector_state()->in_young_gc_before_mixed()) {
2973 return "Pause Young (Prepare Mixed)";
2974 } else {
2975 return "Pause Young (Normal)";
2976 }
2977 } else {
2978 return "Pause Young (Mixed)";
2979 }
2980 }
2981
2982 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2983 assert_at_safepoint_on_vm_thread();
2984 guarantee(!is_gc_active(), "collection is not reentrant");
3100
3101 start_new_collection_set();
3102
3103 _survivor_evac_stats.adjust_desired_plab_sz();
3104 _old_evac_stats.adjust_desired_plab_sz();
3105
3106 if (should_start_conc_mark) {
3107 // We have to do this before we notify the CM threads that
3108 // they can start working to make sure that all the
3109 // appropriate initialization is done on the CM object.
3110 concurrent_mark()->post_initial_mark();
3111 // Note that we don't actually trigger the CM thread at
3112 // this point. We do that later when we're sure that
3113 // the current thread has completed its logging output.
3114 }
3115
3116 allocate_dummy_regions();
3117
3118 _allocator->init_mutator_alloc_regions();
3119
3120 expand_heap_after_young_collection();
3121
3122 double sample_end_time_sec = os::elapsedTime();
3123 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3124 policy()->record_collection_pause_end(pause_time_ms);
3125 }
3126
3127 verify_after_young_collection(verify_type);
3128
3129 #ifdef TRACESPINNING
3130 ParallelTaskTerminator::print_termination_counts();
3131 #endif
3132
3133 gc_epilogue(false);
3134 }
3135
3136 // Print the remainder of the GC log output.
3137 if (evacuation_failed()) {
3138 log_info(gc)("To-space exhausted");
3139 }
3140
|
1162 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1163 // Currently, there is no facility in the do_full_collection(bool) API to notify
1164 // the caller that the collection did not succeed (e.g., because it was locked
1165 // out by the GC locker). So, right now, we'll ignore the return value.
1166 bool dummy = do_full_collection(true, /* explicit_gc */
1167 clear_all_soft_refs);
1168 }
1169
1170 void G1CollectedHeap::resize_heap_if_necessary() {
1171 assert_at_safepoint_on_vm_thread();
1172
1173 // Capacity, free and used after the GC counted as full regions to
1174 // include the waste in the following calculations.
1175 const size_t capacity_after_gc = capacity();
1176 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1177
1178 // This is enforced in arguments.cpp.
1179 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1180 "otherwise the code below doesn't make sense");
1181
1182 size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio);
1183 size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio);
1184
1185 // This assert only makes sense here, before we adjust them
1186 // with respect to the min and max heap size.
1187 assert(minimum_desired_capacity <= maximum_desired_capacity,
1188 "minimum_desired_capacity = " SIZE_FORMAT ", "
1189 "maximum_desired_capacity = " SIZE_FORMAT,
1190 minimum_desired_capacity, maximum_desired_capacity);
1191
1192 // Should not be greater than the heap max size. No need to adjust
1193 // it with respect to the heap min size as it's a lower bound (i.e.,
1194 // we'll try to make the capacity larger than it, not smaller).
1195 minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1196 // Should not be less than the heap min size. No need to adjust it
1197 // with respect to the heap max size as it's an upper bound (i.e.,
1198 // we'll try to make the capacity smaller than it, not greater).
1199 maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
1200
1201 if (capacity_after_gc < minimum_desired_capacity) {
1202 // Don't expand unless it's significant
1203 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
2388 }
2389
2390 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2391 // must be equal to the humongous object limit.
2392 size_t G1CollectedHeap::max_tlab_size() const {
2393 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2394 }
2395
2396 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2397 return _allocator->unsafe_max_tlab_alloc();
2398 }
2399
2400 size_t G1CollectedHeap::max_capacity() const {
2401 return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2402 }
2403
2404 size_t G1CollectedHeap::max_reserved_capacity() const {
2405 return _hrm->max_length() * HeapRegion::GrainBytes;
2406 }
2407
2408 size_t G1CollectedHeap::soft_max_capacity() const {
2409 return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity());
2410 }
2411
2412 jlong G1CollectedHeap::millis_since_last_gc() {
2413 // See the notes in GenCollectedHeap::millis_since_last_gc()
2414 // for more information about the implementation.
2415 jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2416 _policy->collection_pause_end_millis();
2417 if (ret_val < 0) {
2418 log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2419 ". returning zero instead.", ret_val);
2420 return 0;
2421 }
2422 return ret_val;
2423 }
2424
2425 void G1CollectedHeap::deduplicate_string(oop str) {
2426 assert(java_lang_String::is_instance(str), "invariant");
2427
2428 if (G1StringDedup::is_enabled()) {
2429 G1StringDedup::deduplicate(str);
2430 }
2431 }
2918 log_info(gc, verify)("[Verifying RemSets before GC]");
2919 VerifyRegionRemSetClosure v_cl;
2920 heap_region_iterate(&v_cl);
2921 }
2922 _verifier->verify_before_gc(type);
2923 _verifier->check_bitmaps("GC Start");
2924 verify_numa_regions("GC Start");
2925 }
2926
2927 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2928 if (VerifyRememberedSets) {
2929 log_info(gc, verify)("[Verifying RemSets after GC]");
2930 VerifyRegionRemSetClosure v_cl;
2931 heap_region_iterate(&v_cl);
2932 }
2933 _verifier->verify_after_gc(type);
2934 _verifier->check_bitmaps("GC End");
2935 verify_numa_regions("GC End");
2936 }
2937
2938 void G1CollectedHeap::resize_heap_after_young_collection() {
2939 Ticks start = Ticks::now();
2940 if (!expand_heap_after_young_collection()) {
2941 // If we don't attempt to expand heap, try if we need to shrink the heap
2942 shrink_heap_after_young_collection();
2943 }
2944 phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0);
2945 }
2946
2947 bool G1CollectedHeap::expand_heap_after_young_collection(){
2948 size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_young_collection();
2949 if (expand_bytes > 0) {
2950 if (expand(expand_bytes, _workers, NULL)) {
2951 // We failed to expand the heap. Cannot do anything about it.
2952 }
2953 return true;
2954 }
2955 return false;
2956 }
2957
2958 void G1CollectedHeap::shrink_heap_after_young_collection() {
2959 if (!collector_state()->finish_of_mixed_gc()) {
2960 // Do the shrink only after finish of mixed gc
2961 return;
2962 }
2963 size_t shrink_bytes = _heap_sizing_policy->shrink_amount_after_mixed_collections();
2964 if (shrink_bytes > 0) {
2965 shrink(shrink_bytes);
2966 }
2967 }
2968
2969 void G1CollectedHeap::expand_heap_after_concurrent_mark() {
2970 size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_concurrent_mark();
2971 if (expand_bytes > 0) {
2972 expand(expand_bytes, _workers, NULL);
2973 }
2974 }
2975
2976 const char* G1CollectedHeap::young_gc_name() const {
2977 if (collector_state()->in_initial_mark_gc()) {
2978 return "Pause Young (Concurrent Start)";
2979 } else if (collector_state()->in_young_only_phase()) {
2980 if (collector_state()->in_young_gc_before_mixed()) {
2981 return "Pause Young (Prepare Mixed)";
2982 } else {
2983 return "Pause Young (Normal)";
2984 }
2985 } else {
2986 return "Pause Young (Mixed)";
2987 }
2988 }
2989
2990 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2991 assert_at_safepoint_on_vm_thread();
2992 guarantee(!is_gc_active(), "collection is not reentrant");
3108
3109 start_new_collection_set();
3110
3111 _survivor_evac_stats.adjust_desired_plab_sz();
3112 _old_evac_stats.adjust_desired_plab_sz();
3113
3114 if (should_start_conc_mark) {
3115 // We have to do this before we notify the CM threads that
3116 // they can start working to make sure that all the
3117 // appropriate initialization is done on the CM object.
3118 concurrent_mark()->post_initial_mark();
3119 // Note that we don't actually trigger the CM thread at
3120 // this point. We do that later when we're sure that
3121 // the current thread has completed its logging output.
3122 }
3123
3124 allocate_dummy_regions();
3125
3126 _allocator->init_mutator_alloc_regions();
3127
3128 resize_heap_after_young_collection();
3129
3130 double sample_end_time_sec = os::elapsedTime();
3131 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3132 policy()->record_collection_pause_end(pause_time_ms);
3133 }
3134
3135 verify_after_young_collection(verify_type);
3136
3137 #ifdef TRACESPINNING
3138 ParallelTaskTerminator::print_termination_counts();
3139 #endif
3140
3141 gc_epilogue(false);
3142 }
3143
3144 // Print the remainder of the GC log output.
3145 if (evacuation_failed()) {
3146 log_info(gc)("To-space exhausted");
3147 }
3148
|