1027 tear_down_region_sets(false /* free_list_only */);
1028 }
1029
1030 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1031 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1032 assert(used() == recalculate_used(), "Should be equal");
1033 _verifier->verify_region_sets_optional();
1034 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1035 _verifier->check_bitmaps("Full GC Start");
1036 }
1037
1038 void G1CollectedHeap::prepare_heap_for_mutators() {
1039 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1040 ClassLoaderDataGraph::purge();
1041 MetaspaceUtils::verify_metrics();
1042
1043 // Prepare heap for normal collections.
1044 assert(num_free_regions() == 0, "we should not have added any free regions");
1045 rebuild_region_sets(false /* free_list_only */);
1046 abort_refinement();
1047 resize_if_necessary_after_full_collection();
1048
1049 // Rebuild the strong code root lists for each region
1050 rebuild_strong_code_roots();
1051
1052 // Start a new incremental collection set for the next pause
1053 start_new_collection_set();
1054
1055 _allocator->init_mutator_alloc_region();
1056
1057 // Post collection state updates.
1058 MetaspaceGC::compute_new_size();
1059 }
1060
1061 void G1CollectedHeap::abort_refinement() {
1062 if (_hot_card_cache->use_cache()) {
1063 _hot_card_cache->reset_hot_cache();
1064 }
1065
1066 // Discard all remembered set updates.
1067 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1130
1131 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1132 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1133
1134 collector.prepare_collection();
1135 collector.collect();
1136 collector.complete_collection();
1137
1138 // Full collection was successfully completed.
1139 return true;
1140 }
1141
1142 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1143 // Currently, there is no facility in the do_full_collection(bool) API to notify
1144 // the caller that the collection did not succeed (e.g., because it was locked
1145 // out by the GC locker). So, right now, we'll ignore the return value.
1146 bool dummy = do_full_collection(true, /* explicit_gc */
1147 clear_all_soft_refs);
1148 }
1149
1150 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1151 // Capacity, free and used after the GC counted as full regions to
1152 // include the waste in the following calculations.
1153 const size_t capacity_after_gc = capacity();
1154 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1155
1156 // This is enforced in arguments.cpp.
1157 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1158 "otherwise the code below doesn't make sense");
1159
1160 // We don't have floating point command-line arguments
1161 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1162 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1163 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1164 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1165
1166 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1167 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1168
1169 // We have to be careful here as these two calculations can overflow
1170 // 32-bit size_t's.
1187 // This assert only makes sense here, before we adjust them
1188 // with respect to the min and max heap size.
1189 assert(minimum_desired_capacity <= maximum_desired_capacity,
1190 "minimum_desired_capacity = " SIZE_FORMAT ", "
1191 "maximum_desired_capacity = " SIZE_FORMAT,
1192 minimum_desired_capacity, maximum_desired_capacity);
1193
1194 // Should not be greater than the heap max size. No need to adjust
1195 // it with respect to the heap min size as it's a lower bound (i.e.,
1196 // we'll try to make the capacity larger than it, not smaller).
1197 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1198 // Should not be less than the heap min size. No need to adjust it
1199 // with respect to the heap max size as it's an upper bound (i.e.,
1200 // we'll try to make the capacity smaller than it, not greater).
1201 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1202
1203 if (capacity_after_gc < minimum_desired_capacity) {
1204 // Don't expand unless it's significant
1205 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1206
1207 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1208 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1209 "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1210 capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1211
1212 expand(expand_bytes, _workers);
1213
1214 // No expansion, now see if we want to shrink
1215 } else if (capacity_after_gc > maximum_desired_capacity) {
1216 // Capacity too large, compute shrinking size
1217 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1218
1219 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1220 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1221 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1222 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1223
1224 shrink(shrink_bytes);
1225 }
1226 }
1227
1228 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1229 bool do_gc,
1230 bool clear_all_soft_refs,
1231 bool expect_null_mutator_alloc_region,
1232 bool* gc_succeeded) {
1233 *gc_succeeded = true;
1234 // Let's attempt the allocation first.
1235 HeapWord* result =
1236 attempt_allocation_at_safepoint(word_size,
1237 expect_null_mutator_alloc_region);
1238 if (result != NULL) {
1239 return result;
1375 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1376 HeapRegion::GrainBytes);
1377 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1378
1379 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1380 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1381
1382
1383 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1384 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1385 if (num_regions_removed > 0) {
1386 g1_policy()->record_new_heap_size(num_regions());
1387 } else {
1388 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1389 }
1390 }
1391
1392 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1393 _verifier->verify_region_sets_optional();
1394
1395 // We should only reach here at the end of a Full GC which means we
1396 // should not not be holding to any GC alloc regions. The method
1397 // below will make sure of that and do any remaining clean up.
1398 _allocator->abandon_gc_alloc_regions();
1399
1400 // Instead of tearing down / rebuilding the free lists here, we
1401 // could instead use the remove_all_pending() method on free_list to
1402 // remove only the ones that we need to remove.
1403 tear_down_region_sets(true /* free_list_only */);
1404 shrink_helper(shrink_bytes);
1405 rebuild_region_sets(true /* free_list_only */);
1406
1407 _hrm.verify_optional();
1408 _verifier->verify_region_sets_optional();
1409 }
1410
1411 class OldRegionSetChecker : public HeapRegionSetChecker {
1412 public:
1413 void check_mt_safety() {
1414 // Master Old Set MT safety protocol:
1415 // (a) If we're at a safepoint, operations on the master old set
1416 // should be invoked:
4378 bool _free_list_only;
4379
4380 HeapRegionSet* _old_set;
4381 HeapRegionManager* _hrm;
4382
4383 size_t _total_used;
4384
4385 public:
4386 RebuildRegionSetsClosure(bool free_list_only,
4387 HeapRegionSet* old_set,
4388 HeapRegionManager* hrm) :
4389 _free_list_only(free_list_only),
4390 _old_set(old_set), _hrm(hrm), _total_used(0) {
4391 assert(_hrm->num_free_regions() == 0, "pre-condition");
4392 if (!free_list_only) {
4393 assert(_old_set->is_empty(), "pre-condition");
4394 }
4395 }
4396
4397 bool do_heap_region(HeapRegion* r) {
4398 // After full GC, no region should have a remembered set.
4399 r->rem_set()->clear(true);
4400 if (r->is_empty()) {
4401 // Add free regions to the free list
4402 r->set_free();
4403 _hrm->insert_into_free_list(r);
4404 } else if (!_free_list_only) {
4405
4406 if (r->is_archive() || r->is_humongous()) {
4407 // We ignore archive and humongous regions. We left these sets unchanged.
4408 } else {
4409 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
4410 // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
4411 r->move_to_old();
4412 _old_set->add(r);
4413 }
4414 _total_used += r->used();
4415 }
4416
4417 return false;
4418 }
4419
4420 size_t total_used() {
4421 return _total_used;
4422 }
4423 };
4424
4425 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4426 assert_at_safepoint_on_vm_thread();
4427
4428 if (!free_list_only) {
4429 _eden.clear();
4430 _survivor.clear();
4431 }
4432
4433 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
4434 heap_region_iterate(&cl);
4435
4436 if (!free_list_only) {
4437 set_used(cl.total_used());
4438 if (_archive_allocator != NULL) {
4439 _archive_allocator->clear_used();
4440 }
4441 }
4442 assert(used_unlocked() == recalculate_used(),
4443 "inconsistent used_unlocked(), "
4444 "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4445 used_unlocked(), recalculate_used());
4446 }
4447
4448 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4449 HeapRegion* hr = heap_region_containing(p);
4450 return hr->is_in(p);
4451 }
4452
4453 // Methods for the mutator alloc region
4454
4455 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4456 bool force) {
4457 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4458 bool should_allocate = g1_policy()->should_allocate_mutator_region();
4459 if (force || should_allocate) {
4460 HeapRegion* new_alloc_region = new_region(word_size,
4461 false /* is_old */,
4462 false /* do_expand */);
4463 if (new_alloc_region != NULL) {
4464 set_region_short_lived_locked(new_alloc_region);
4465 _hr_printer.alloc(new_alloc_region, !should_allocate);
|
1027 tear_down_region_sets(false /* free_list_only */);
1028 }
1029
1030 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1031 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1032 assert(used() == recalculate_used(), "Should be equal");
1033 _verifier->verify_region_sets_optional();
1034 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1035 _verifier->check_bitmaps("Full GC Start");
1036 }
1037
1038 void G1CollectedHeap::prepare_heap_for_mutators() {
1039 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1040 ClassLoaderDataGraph::purge();
1041 MetaspaceUtils::verify_metrics();
1042
1043 // Prepare heap for normal collections.
1044 assert(num_free_regions() == 0, "we should not have added any free regions");
1045 rebuild_region_sets(false /* free_list_only */);
1046 abort_refinement();
1047 resize_heap_if_necessary();
1048
1049 // Rebuild the strong code root lists for each region
1050 rebuild_strong_code_roots();
1051
1052 // Start a new incremental collection set for the next pause
1053 start_new_collection_set();
1054
1055 _allocator->init_mutator_alloc_region();
1056
1057 // Post collection state updates.
1058 MetaspaceGC::compute_new_size();
1059 }
1060
1061 void G1CollectedHeap::abort_refinement() {
1062 if (_hot_card_cache->use_cache()) {
1063 _hot_card_cache->reset_hot_cache();
1064 }
1065
1066 // Discard all remembered set updates.
1067 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1130
1131 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1132 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1133
1134 collector.prepare_collection();
1135 collector.collect();
1136 collector.complete_collection();
1137
1138 // Full collection was successfully completed.
1139 return true;
1140 }
1141
1142 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1143 // Currently, there is no facility in the do_full_collection(bool) API to notify
1144 // the caller that the collection did not succeed (e.g., because it was locked
1145 // out by the GC locker). So, right now, we'll ignore the return value.
1146 bool dummy = do_full_collection(true, /* explicit_gc */
1147 clear_all_soft_refs);
1148 }
1149
1150 void G1CollectedHeap::resize_heap_if_necessary() {
1151 // Capacity, free and used after the GC counted as full regions to
1152 // include the waste in the following calculations.
1153 const size_t capacity_after_gc = capacity();
1154 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1155
1156 // This is enforced in arguments.cpp.
1157 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1158 "otherwise the code below doesn't make sense");
1159
1160 // We don't have floating point command-line arguments
1161 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1162 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1163 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1164 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1165
1166 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1167 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1168
1169 // We have to be careful here as these two calculations can overflow
1170 // 32-bit size_t's.
1187 // This assert only makes sense here, before we adjust them
1188 // with respect to the min and max heap size.
1189 assert(minimum_desired_capacity <= maximum_desired_capacity,
1190 "minimum_desired_capacity = " SIZE_FORMAT ", "
1191 "maximum_desired_capacity = " SIZE_FORMAT,
1192 minimum_desired_capacity, maximum_desired_capacity);
1193
1194 // Should not be greater than the heap max size. No need to adjust
1195 // it with respect to the heap min size as it's a lower bound (i.e.,
1196 // we'll try to make the capacity larger than it, not smaller).
1197 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1198 // Should not be less than the heap min size. No need to adjust it
1199 // with respect to the heap max size as it's an upper bound (i.e.,
1200 // we'll try to make the capacity smaller than it, not greater).
1201 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1202
1203 if (capacity_after_gc < minimum_desired_capacity) {
1204 // Don't expand unless it's significant
1205 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1206
1207 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
1208 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1209 "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1210 capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1211
1212 expand(expand_bytes, _workers);
1213
1214 // No expansion, now see if we want to shrink
1215 } else if (capacity_after_gc > maximum_desired_capacity) {
1216 // Capacity too large, compute shrinking size
1217 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1218
1219 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
1220 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1221 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1222 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1223
1224 shrink(shrink_bytes);
1225 }
1226 }
1227
1228 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1229 bool do_gc,
1230 bool clear_all_soft_refs,
1231 bool expect_null_mutator_alloc_region,
1232 bool* gc_succeeded) {
1233 *gc_succeeded = true;
1234 // Let's attempt the allocation first.
1235 HeapWord* result =
1236 attempt_allocation_at_safepoint(word_size,
1237 expect_null_mutator_alloc_region);
1238 if (result != NULL) {
1239 return result;
1375 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1376 HeapRegion::GrainBytes);
1377 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1378
1379 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1380 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1381
1382
1383 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1384 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1385 if (num_regions_removed > 0) {
1386 g1_policy()->record_new_heap_size(num_regions());
1387 } else {
1388 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1389 }
1390 }
1391
1392 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1393 _verifier->verify_region_sets_optional();
1394
1395 // We should only reach here at the end of a Full GC or during Remark which
1396 // means we should not not be holding to any GC alloc regions. The method
1397 // below will make sure of that and do any remaining clean up.
1398 _allocator->abandon_gc_alloc_regions();
1399
1400 // Instead of tearing down / rebuilding the free lists here, we
1401 // could instead use the remove_all_pending() method on free_list to
1402 // remove only the ones that we need to remove.
1403 tear_down_region_sets(true /* free_list_only */);
1404 shrink_helper(shrink_bytes);
1405 rebuild_region_sets(true /* free_list_only */);
1406
1407 _hrm.verify_optional();
1408 _verifier->verify_region_sets_optional();
1409 }
1410
1411 class OldRegionSetChecker : public HeapRegionSetChecker {
1412 public:
1413 void check_mt_safety() {
1414 // Master Old Set MT safety protocol:
1415 // (a) If we're at a safepoint, operations on the master old set
1416 // should be invoked:
4378 bool _free_list_only;
4379
4380 HeapRegionSet* _old_set;
4381 HeapRegionManager* _hrm;
4382
4383 size_t _total_used;
4384
4385 public:
4386 RebuildRegionSetsClosure(bool free_list_only,
4387 HeapRegionSet* old_set,
4388 HeapRegionManager* hrm) :
4389 _free_list_only(free_list_only),
4390 _old_set(old_set), _hrm(hrm), _total_used(0) {
4391 assert(_hrm->num_free_regions() == 0, "pre-condition");
4392 if (!free_list_only) {
4393 assert(_old_set->is_empty(), "pre-condition");
4394 }
4395 }
4396
4397 bool do_heap_region(HeapRegion* r) {
4398 if (r->is_empty()) {
4399 assert(r->rem_set()->is_empty(), "Remembered sets should be empty.");
4400 // Add free regions to the free list
4401 r->set_free();
4402 _hrm->insert_into_free_list(r);
4403 } else if (!_free_list_only) {
4404 assert(r->rem_set()->is_empty(), "Remembered sets should be empty.");
4405
4406 if (r->is_archive() || r->is_humongous()) {
4407 // We ignore archive and humongous regions. We left these sets unchanged.
4408 } else {
4409 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
4410 // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
4411 r->move_to_old();
4412 _old_set->add(r);
4413 }
4414 _total_used += r->used();
4415 }
4416
4417 return false;
4418 }
4419
4420 size_t total_used() {
4421 return _total_used;
4422 }
4423 };
4424
4425 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4426 assert_at_safepoint_on_vm_thread();
4427
4428 if (!free_list_only) {
4429 _eden.clear();
4430 _survivor.clear();
4431 }
4432
4433 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
4434 heap_region_iterate(&cl);
4435
4436 if (!free_list_only) {
4437 set_used(cl.total_used());
4438 if (_archive_allocator != NULL) {
4439 _archive_allocator->clear_used();
4440 }
4441 }
4442 assert(used() == recalculate_used(),
4443 "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4444 used(), recalculate_used());
4445 }
4446
4447 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4448 HeapRegion* hr = heap_region_containing(p);
4449 return hr->is_in(p);
4450 }
4451
4452 // Methods for the mutator alloc region
4453
4454 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4455 bool force) {
4456 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4457 bool should_allocate = g1_policy()->should_allocate_mutator_region();
4458 if (force || should_allocate) {
4459 HeapRegion* new_alloc_region = new_region(word_size,
4460 false /* is_old */,
4461 false /* do_expand */);
4462 if (new_alloc_region != NULL) {
4463 set_region_short_lived_locked(new_alloc_region);
4464 _hr_printer.alloc(new_alloc_region, !should_allocate);
|