1026 tear_down_region_sets(false /* free_list_only */);
1027 }
1028
1029 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1030 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1031 assert(used() == recalculate_used(), "Should be equal");
1032 _verifier->verify_region_sets_optional();
1033 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1034 _verifier->check_bitmaps("Full GC Start");
1035 }
1036
1037 void G1CollectedHeap::prepare_heap_for_mutators() {
1038 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1039 ClassLoaderDataGraph::purge();
1040 MetaspaceUtils::verify_metrics();
1041
1042 // Prepare heap for normal collections.
1043 assert(num_free_regions() == 0, "we should not have added any free regions");
1044 rebuild_region_sets(false /* free_list_only */);
1045 abort_refinement();
1046 resize_if_necessary_after_full_collection();
1047
1048 // Rebuild the strong code root lists for each region
1049 rebuild_strong_code_roots();
1050
1051 // Purge code root memory
1052 purge_code_root_memory();
1053
1054 // Start a new incremental collection set for the next pause
1055 start_new_collection_set();
1056
1057 _allocator->init_mutator_alloc_region();
1058
1059 // Post collection state updates.
1060 MetaspaceGC::compute_new_size();
1061 }
1062
1063 void G1CollectedHeap::abort_refinement() {
1064 if (_hot_card_cache->use_cache()) {
1065 _hot_card_cache->reset_hot_cache();
1066 }
1132
1133 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1134 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1135
1136 collector.prepare_collection();
1137 collector.collect();
1138 collector.complete_collection();
1139
1140 // Full collection was successfully completed.
1141 return true;
1142 }
1143
1144 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1145 // Currently, there is no facility in the do_full_collection(bool) API to notify
1146 // the caller that the collection did not succeed (e.g., because it was locked
1147 // out by the GC locker). So, right now, we'll ignore the return value.
1148 bool dummy = do_full_collection(true, /* explicit_gc */
1149 clear_all_soft_refs);
1150 }
1151
1152 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1153 // Capacity, free and used after the GC counted as full regions to
1154 // include the waste in the following calculations.
1155 const size_t capacity_after_gc = capacity();
1156 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1157
1158 // This is enforced in arguments.cpp.
1159 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1160 "otherwise the code below doesn't make sense");
1161
1162 // We don't have floating point command-line arguments
1163 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1164 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1165 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1166 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1167
1168 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1169 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1170
1171 // We have to be careful here as these two calculations can overflow
1172 // 32-bit size_t's.
1189 // This assert only makes sense here, before we adjust them
1190 // with respect to the min and max heap size.
1191 assert(minimum_desired_capacity <= maximum_desired_capacity,
1192 "minimum_desired_capacity = " SIZE_FORMAT ", "
1193 "maximum_desired_capacity = " SIZE_FORMAT,
1194 minimum_desired_capacity, maximum_desired_capacity);
1195
1196 // Should not be greater than the heap max size. No need to adjust
1197 // it with respect to the heap min size as it's a lower bound (i.e.,
1198 // we'll try to make the capacity larger than it, not smaller).
1199 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1200 // Should not be less than the heap min size. No need to adjust it
1201 // with respect to the heap max size as it's an upper bound (i.e.,
1202 // we'll try to make the capacity smaller than it, not greater).
1203 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1204
1205 if (capacity_after_gc < minimum_desired_capacity) {
1206 // Don't expand unless it's significant
1207 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1208
1209 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1210 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1211 "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1212 capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1213
1214 expand(expand_bytes, _workers);
1215
1216 // No expansion, now see if we want to shrink
1217 } else if (capacity_after_gc > maximum_desired_capacity) {
1218 // Capacity too large, compute shrinking size
1219 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1220
1221 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1222 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1223 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1224 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1225
1226 shrink(shrink_bytes);
1227 }
1228 }
1229
1230 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1231 bool do_gc,
1232 bool clear_all_soft_refs,
1233 bool expect_null_mutator_alloc_region,
1234 bool* gc_succeeded) {
1235 *gc_succeeded = true;
1236 // Let's attempt the allocation first.
1237 HeapWord* result =
1238 attempt_allocation_at_safepoint(word_size,
1239 expect_null_mutator_alloc_region);
1240 if (result != NULL) {
1241 return result;
1377 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1378 HeapRegion::GrainBytes);
1379 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1380
1381 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1382 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1383
1384
1385 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1386 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1387 if (num_regions_removed > 0) {
1388 g1_policy()->record_new_heap_size(num_regions());
1389 } else {
1390 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1391 }
1392 }
1393
1394 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1395 _verifier->verify_region_sets_optional();
1396
1397 // We should only reach here at the end of a Full GC which means we
1398 // should not not be holding to any GC alloc regions. The method
1399 // below will make sure of that and do any remaining clean up.
1400 _allocator->abandon_gc_alloc_regions();
1401
1402 // Instead of tearing down / rebuilding the free lists here, we
1403 // could instead use the remove_all_pending() method on free_list to
1404 // remove only the ones that we need to remove.
1405 tear_down_region_sets(true /* free_list_only */);
1406 shrink_helper(shrink_bytes);
1407 rebuild_region_sets(true /* free_list_only */);
1408
1409 _hrm.verify_optional();
1410 _verifier->verify_region_sets_optional();
1411 }
1412
1413 class OldRegionSetChecker : public HeapRegionSetChecker {
1414 public:
1415 void check_mt_safety() {
1416 // Master Old Set MT safety protocol:
1417 // (a) If we're at a safepoint, operations on the master old set
1418 // should be invoked:
4382 bool _free_list_only;
4383
4384 HeapRegionSet* _old_set;
4385 HeapRegionManager* _hrm;
4386
4387 size_t _total_used;
4388
4389 public:
4390 RebuildRegionSetsClosure(bool free_list_only,
4391 HeapRegionSet* old_set,
4392 HeapRegionManager* hrm) :
4393 _free_list_only(free_list_only),
4394 _old_set(old_set), _hrm(hrm), _total_used(0) {
4395 assert(_hrm->num_free_regions() == 0, "pre-condition");
4396 if (!free_list_only) {
4397 assert(_old_set->is_empty(), "pre-condition");
4398 }
4399 }
4400
4401 bool do_heap_region(HeapRegion* r) {
4402 // After full GC, no region should have a remembered set.
4403 r->rem_set()->clear(true);
4404 if (r->is_empty()) {
4405 // Add free regions to the free list
4406 r->set_free();
4407 _hrm->insert_into_free_list(r);
4408 } else if (!_free_list_only) {
4409
4410 if (r->is_archive() || r->is_humongous()) {
4411 // We ignore archive and humongous regions. We left these sets unchanged.
4412 } else {
4413 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
4414 // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
4415 r->move_to_old();
4416 _old_set->add(r);
4417 }
4418 _total_used += r->used();
4419 }
4420
4421 return false;
4422 }
4423
4424 size_t total_used() {
4425 return _total_used;
4426 }
4427 };
4428
4429 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4430 assert_at_safepoint_on_vm_thread();
4431
4432 if (!free_list_only) {
4433 _eden.clear();
4434 _survivor.clear();
4435 }
4436
4437 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
4438 heap_region_iterate(&cl);
4439
4440 if (!free_list_only) {
4441 set_used(cl.total_used());
4442 if (_archive_allocator != NULL) {
4443 _archive_allocator->clear_used();
4444 }
4445 }
4446 assert(used_unlocked() == recalculate_used(),
4447 "inconsistent used_unlocked(), "
4448 "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4449 used_unlocked(), recalculate_used());
4450 }
4451
4452 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4453 HeapRegion* hr = heap_region_containing(p);
4454 return hr->is_in(p);
4455 }
4456
4457 // Methods for the mutator alloc region
4458
4459 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4460 bool force) {
4461 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4462 bool should_allocate = g1_policy()->should_allocate_mutator_region();
4463 if (force || should_allocate) {
4464 HeapRegion* new_alloc_region = new_region(word_size,
4465 false /* is_old */,
4466 false /* do_expand */);
4467 if (new_alloc_region != NULL) {
4468 set_region_short_lived_locked(new_alloc_region);
4469 _hr_printer.alloc(new_alloc_region, !should_allocate);
|
1026 tear_down_region_sets(false /* free_list_only */);
1027 }
1028
1029 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1030 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1031 assert(used() == recalculate_used(), "Should be equal");
1032 _verifier->verify_region_sets_optional();
1033 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1034 _verifier->check_bitmaps("Full GC Start");
1035 }
1036
1037 void G1CollectedHeap::prepare_heap_for_mutators() {
1038 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1039 ClassLoaderDataGraph::purge();
1040 MetaspaceUtils::verify_metrics();
1041
1042 // Prepare heap for normal collections.
1043 assert(num_free_regions() == 0, "we should not have added any free regions");
1044 rebuild_region_sets(false /* free_list_only */);
1045 abort_refinement();
1046 resize_heap_if_necessary();
1047
1048 // Rebuild the strong code root lists for each region
1049 rebuild_strong_code_roots();
1050
1051 // Purge code root memory
1052 purge_code_root_memory();
1053
1054 // Start a new incremental collection set for the next pause
1055 start_new_collection_set();
1056
1057 _allocator->init_mutator_alloc_region();
1058
1059 // Post collection state updates.
1060 MetaspaceGC::compute_new_size();
1061 }
1062
1063 void G1CollectedHeap::abort_refinement() {
1064 if (_hot_card_cache->use_cache()) {
1065 _hot_card_cache->reset_hot_cache();
1066 }
1132
1133 G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1134 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1135
1136 collector.prepare_collection();
1137 collector.collect();
1138 collector.complete_collection();
1139
1140 // Full collection was successfully completed.
1141 return true;
1142 }
1143
1144 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1145 // Currently, there is no facility in the do_full_collection(bool) API to notify
1146 // the caller that the collection did not succeed (e.g., because it was locked
1147 // out by the GC locker). So, right now, we'll ignore the return value.
1148 bool dummy = do_full_collection(true, /* explicit_gc */
1149 clear_all_soft_refs);
1150 }
1151
1152 void G1CollectedHeap::resize_heap_if_necessary() {
1153 // Capacity, free and used after the GC counted as full regions to
1154 // include the waste in the following calculations.
1155 const size_t capacity_after_gc = capacity();
1156 const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1157
1158 // This is enforced in arguments.cpp.
1159 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1160 "otherwise the code below doesn't make sense");
1161
1162 // We don't have floating point command-line arguments
1163 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1164 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1165 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1166 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1167
1168 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1169 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1170
1171 // We have to be careful here as these two calculations can overflow
1172 // 32-bit size_t's.
1189 // This assert only makes sense here, before we adjust them
1190 // with respect to the min and max heap size.
1191 assert(minimum_desired_capacity <= maximum_desired_capacity,
1192 "minimum_desired_capacity = " SIZE_FORMAT ", "
1193 "maximum_desired_capacity = " SIZE_FORMAT,
1194 minimum_desired_capacity, maximum_desired_capacity);
1195
1196 // Should not be greater than the heap max size. No need to adjust
1197 // it with respect to the heap min size as it's a lower bound (i.e.,
1198 // we'll try to make the capacity larger than it, not smaller).
1199 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1200 // Should not be less than the heap min size. No need to adjust it
1201 // with respect to the heap max size as it's an upper bound (i.e.,
1202 // we'll try to make the capacity smaller than it, not greater).
1203 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1204
1205 if (capacity_after_gc < minimum_desired_capacity) {
1206 // Don't expand unless it's significant
1207 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1208
1209 log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
1210 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1211 "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1212 capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1213
1214 expand(expand_bytes, _workers);
1215
1216 // No expansion, now see if we want to shrink
1217 } else if (capacity_after_gc > maximum_desired_capacity) {
1218 // Capacity too large, compute shrinking size
1219 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1220
1221 log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
1222 "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1223 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1224 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1225
1226 shrink(shrink_bytes);
1227 }
1228 }
1229
1230 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1231 bool do_gc,
1232 bool clear_all_soft_refs,
1233 bool expect_null_mutator_alloc_region,
1234 bool* gc_succeeded) {
1235 *gc_succeeded = true;
1236 // Let's attempt the allocation first.
1237 HeapWord* result =
1238 attempt_allocation_at_safepoint(word_size,
1239 expect_null_mutator_alloc_region);
1240 if (result != NULL) {
1241 return result;
1377 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1378 HeapRegion::GrainBytes);
1379 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1380
1381 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1382 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1383
1384
1385 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1386 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1387 if (num_regions_removed > 0) {
1388 g1_policy()->record_new_heap_size(num_regions());
1389 } else {
1390 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1391 }
1392 }
1393
1394 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1395 _verifier->verify_region_sets_optional();
1396
1397 // We should only reach here at the end of a Full GC or during Remark which
1398 // means we should not not be holding to any GC alloc regions. The method
1399 // below will make sure of that and do any remaining clean up.
1400 _allocator->abandon_gc_alloc_regions();
1401
1402 // Instead of tearing down / rebuilding the free lists here, we
1403 // could instead use the remove_all_pending() method on free_list to
1404 // remove only the ones that we need to remove.
1405 tear_down_region_sets(true /* free_list_only */);
1406 shrink_helper(shrink_bytes);
1407 rebuild_region_sets(true /* free_list_only */);
1408
1409 _hrm.verify_optional();
1410 _verifier->verify_region_sets_optional();
1411 }
1412
1413 class OldRegionSetChecker : public HeapRegionSetChecker {
1414 public:
1415 void check_mt_safety() {
1416 // Master Old Set MT safety protocol:
1417 // (a) If we're at a safepoint, operations on the master old set
1418 // should be invoked:
4382 bool _free_list_only;
4383
4384 HeapRegionSet* _old_set;
4385 HeapRegionManager* _hrm;
4386
4387 size_t _total_used;
4388
4389 public:
4390 RebuildRegionSetsClosure(bool free_list_only,
4391 HeapRegionSet* old_set,
4392 HeapRegionManager* hrm) :
4393 _free_list_only(free_list_only),
4394 _old_set(old_set), _hrm(hrm), _total_used(0) {
4395 assert(_hrm->num_free_regions() == 0, "pre-condition");
4396 if (!free_list_only) {
4397 assert(_old_set->is_empty(), "pre-condition");
4398 }
4399 }
4400
4401 bool do_heap_region(HeapRegion* r) {
4402 if (r->is_empty()) {
4403 assert(r->rem_set()->is_empty(), "Empty regions should have empty remembered sets.");
4404 // Add free regions to the free list
4405 r->set_free();
4406 _hrm->insert_into_free_list(r);
4407 } else if (!_free_list_only) {
4408 assert(r->rem_set()->is_empty(), "At this point remembered sets must have been cleared.");
4409
4410 if (r->is_archive() || r->is_humongous()) {
4411 // We ignore archive and humongous regions. We left these sets unchanged.
4412 } else {
4413 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
4414 // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
4415 r->move_to_old();
4416 _old_set->add(r);
4417 }
4418 _total_used += r->used();
4419 }
4420
4421 return false;
4422 }
4423
4424 size_t total_used() {
4425 return _total_used;
4426 }
4427 };
4428
4429 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4430 assert_at_safepoint_on_vm_thread();
4431
4432 if (!free_list_only) {
4433 _eden.clear();
4434 _survivor.clear();
4435 }
4436
4437 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
4438 heap_region_iterate(&cl);
4439
4440 if (!free_list_only) {
4441 set_used(cl.total_used());
4442 if (_archive_allocator != NULL) {
4443 _archive_allocator->clear_used();
4444 }
4445 }
4446 assert(used() == recalculate_used(),
4447 "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4448 used(), recalculate_used());
4449 }
4450
4451 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4452 HeapRegion* hr = heap_region_containing(p);
4453 return hr->is_in(p);
4454 }
4455
4456 // Methods for the mutator alloc region
4457
4458 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4459 bool force) {
4460 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4461 bool should_allocate = g1_policy()->should_allocate_mutator_region();
4462 if (force || should_allocate) {
4463 HeapRegion* new_alloc_region = new_region(word_size,
4464 false /* is_old */,
4465 false /* do_expand */);
4466 if (new_alloc_region != NULL) {
4467 set_region_short_lived_locked(new_alloc_region);
4468 _hr_printer.alloc(new_alloc_region, !should_allocate);
|