48 #include "gc/g1/g1StringDedup.hpp"
49 #include "gc/g1/g1YCTypes.hpp"
50 #include "gc/g1/heapRegion.inline.hpp"
51 #include "gc/g1/heapRegionRemSet.hpp"
52 #include "gc/g1/heapRegionSet.inline.hpp"
53 #include "gc/g1/suspendibleThreadSet.hpp"
54 #include "gc/g1/vm_operations_g1.hpp"
55 #include "gc/shared/gcHeapSummary.hpp"
56 #include "gc/shared/gcLocker.inline.hpp"
57 #include "gc/shared/gcTimer.hpp"
58 #include "gc/shared/gcTrace.hpp"
59 #include "gc/shared/gcTraceTime.hpp"
60 #include "gc/shared/generationSpec.hpp"
61 #include "gc/shared/isGCActiveMark.hpp"
62 #include "gc/shared/referenceProcessor.hpp"
63 #include "gc/shared/taskqueue.inline.hpp"
64 #include "memory/allocation.hpp"
65 #include "memory/iterator.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "runtime/atomic.inline.hpp"
68 #include "runtime/orderAccess.inline.hpp"
69 #include "runtime/vmThread.hpp"
70 #include "utilities/globalDefinitions.hpp"
71 #include "utilities/stack.inline.hpp"
72
73 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
74
75 // turn it on so that the contents of the young list (scan-only /
76 // to-be-collected) are printed at "strategic" points before / during
77 // / after the collection --- this is useful for debugging
78 #define YOUNG_LIST_VERBOSE 0
79 // CURRENT STATUS
80 // This file is under construction. Search for "FIXME".
81
82 // INVARIANTS/NOTES
83 //
84 // All allocation activity covered by the G1CollectedHeap interface is
85 // serialized by acquiring the HeapLock. This happens in mem_allocate
86 // and allocate_new_tlab, which are the "entry" points to the
87 // allocation code from the rest of the JVM. (Note that this does not
932 // Call complete_archive to do the real work, filling in the MemRegion
933 // array with the archive regions.
934 _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
935 delete _archive_allocator;
936 _archive_allocator = NULL;
937 }
938
939 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
940 assert(ranges != NULL, "MemRegion array NULL");
941 assert(count != 0, "No MemRegions provided");
942 MemRegion reserved = _hrm.reserved();
943 for (size_t i = 0; i < count; i++) {
944 if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
945 return false;
946 }
947 }
948 return true;
949 }
950
951 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
952 assert(ranges != NULL, "MemRegion array NULL");
953 assert(count != 0, "No MemRegions provided");
954 MutexLockerEx x(Heap_lock);
955
956 MemRegion reserved = _hrm.reserved();
957 HeapWord* prev_last_addr = NULL;
958 HeapRegion* prev_last_region = NULL;
959
960 // Temporarily disable pretouching of heap pages. This interface is used
961 // when mmap'ing archived heap data in, so pre-touching is wasted.
962 FlagSetting fs(AlwaysPreTouch, false);
963
964 // Enable archive object checking in G1MarkSweep. We have to let it know
965 // about each archive range, so that objects in those ranges aren't marked.
966 G1MarkSweep::enable_archive_object_check();
967
968 // For each specified MemRegion range, allocate the corresponding G1
969 // regions and mark them as archive regions. We expect the ranges in
970 // ascending starting address order, without overlap.
971 for (size_t i = 0; i < count; i++) {
1020 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1021 prev_last_region = last_region;
1022
1023 while (curr_region != NULL) {
1024 assert(curr_region->is_empty() && !curr_region->is_pinned(),
1025 err_msg("Region already in use (index %u)", curr_region->hrm_index()));
1026 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
1027 curr_region->set_allocation_context(AllocationContext::system());
1028 curr_region->set_archive();
1029 _old_set.add(curr_region);
1030 if (curr_region != last_region) {
1031 curr_region->set_top(curr_region->end());
1032 curr_region = _hrm.next_region_in_heap(curr_region);
1033 } else {
1034 curr_region->set_top(last_address + 1);
1035 curr_region = NULL;
1036 }
1037 }
1038
1039 // Notify mark-sweep of the archive range.
1040 G1MarkSweep::mark_range_archive(curr_range);
1041 }
1042 return true;
1043 }
1044
1045 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
1046 assert(ranges != NULL, "MemRegion array NULL");
1047 assert(count != 0, "No MemRegions provided");
1048 MemRegion reserved = _hrm.reserved();
1049 HeapWord *prev_last_addr = NULL;
1050 HeapRegion* prev_last_region = NULL;
1051
1052 // For each MemRegion, create filler objects, if needed, in the G1 regions
1053 // that contain the address range. The address range actually within the
1054 // MemRegion will not be modified. That is assumed to have been initialized
1055 // elsewhere, probably via an mmap of archived heap data.
1056 MutexLockerEx x(Heap_lock);
1057 for (size_t i = 0; i < count; i++) {
1058 HeapWord* start_address = ranges[i].start();
1059 HeapWord* last_address = ranges[i].last();
1060
1061 assert(reserved.contains(start_address) && reserved.contains(last_address),
1062 err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1063 p2i(start_address), p2i(last_address)));
1064 assert(start_address > prev_last_addr,
1065 err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1106 uint* gc_count_before_ret,
1107 uint* gclocker_retry_count_ret) {
1108 assert_heap_not_locked_and_not_at_safepoint();
1109 assert(!is_humongous(word_size), "attempt_allocation() should not "
1110 "be called for humongous allocation requests");
1111
1112 AllocationContext_t context = AllocationContext::current();
1113 HeapWord* result = _allocator->attempt_allocation(word_size, context);
1114
1115 if (result == NULL) {
1116 result = attempt_allocation_slow(word_size,
1117 context,
1118 gc_count_before_ret,
1119 gclocker_retry_count_ret);
1120 }
1121 assert_heap_not_locked();
1122 if (result != NULL) {
1123 dirty_young_block(result, word_size);
1124 }
1125 return result;
1126 }
1127
1128 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1129 uint* gc_count_before_ret,
1130 uint* gclocker_retry_count_ret) {
1131 // The structure of this method has a lot of similarities to
1132 // attempt_allocation_slow(). The reason these two were not merged
1133 // into a single one is that such a method would require several "if
1134 // allocation is not humongous do this, otherwise do that"
1135 // conditional paths which would obscure its flow. In fact, an early
1136 // version of this code did use a unified method which was harder to
1137 // follow and, as a result, it had subtle bugs that were hard to
1138 // track down. So keeping these two methods separate allows each to
1139 // be more readable. It will be good to keep these two in sync as
1140 // much as possible.
1141
1142 assert_heap_not_locked_and_not_at_safepoint();
1143 assert(is_humongous(word_size), "attempt_allocation_humongous() "
1144 "should only be called for humongous allocations");
1145
|
48 #include "gc/g1/g1StringDedup.hpp"
49 #include "gc/g1/g1YCTypes.hpp"
50 #include "gc/g1/heapRegion.inline.hpp"
51 #include "gc/g1/heapRegionRemSet.hpp"
52 #include "gc/g1/heapRegionSet.inline.hpp"
53 #include "gc/g1/suspendibleThreadSet.hpp"
54 #include "gc/g1/vm_operations_g1.hpp"
55 #include "gc/shared/gcHeapSummary.hpp"
56 #include "gc/shared/gcLocker.inline.hpp"
57 #include "gc/shared/gcTimer.hpp"
58 #include "gc/shared/gcTrace.hpp"
59 #include "gc/shared/gcTraceTime.hpp"
60 #include "gc/shared/generationSpec.hpp"
61 #include "gc/shared/isGCActiveMark.hpp"
62 #include "gc/shared/referenceProcessor.hpp"
63 #include "gc/shared/taskqueue.inline.hpp"
64 #include "memory/allocation.hpp"
65 #include "memory/iterator.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "runtime/atomic.inline.hpp"
68 #include "runtime/init.hpp"
69 #include "runtime/orderAccess.inline.hpp"
70 #include "runtime/vmThread.hpp"
71 #include "utilities/globalDefinitions.hpp"
72 #include "utilities/stack.inline.hpp"
73
74 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
75
76 // turn it on so that the contents of the young list (scan-only /
77 // to-be-collected) are printed at "strategic" points before / during
78 // / after the collection --- this is useful for debugging
79 #define YOUNG_LIST_VERBOSE 0
80 // CURRENT STATUS
81 // This file is under construction. Search for "FIXME".
82
83 // INVARIANTS/NOTES
84 //
85 // All allocation activity covered by the G1CollectedHeap interface is
86 // serialized by acquiring the HeapLock. This happens in mem_allocate
87 // and allocate_new_tlab, which are the "entry" points to the
88 // allocation code from the rest of the JVM. (Note that this does not
933 // Call complete_archive to do the real work, filling in the MemRegion
934 // array with the archive regions.
935 _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
936 delete _archive_allocator;
937 _archive_allocator = NULL;
938 }
939
940 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
941 assert(ranges != NULL, "MemRegion array NULL");
942 assert(count != 0, "No MemRegions provided");
943 MemRegion reserved = _hrm.reserved();
944 for (size_t i = 0; i < count; i++) {
945 if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
946 return false;
947 }
948 }
949 return true;
950 }
951
952 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
953 assert(!is_init_completed(), "Expect to be called at JVM init time");
954 assert(ranges != NULL, "MemRegion array NULL");
955 assert(count != 0, "No MemRegions provided");
956 MutexLockerEx x(Heap_lock);
957
958 MemRegion reserved = _hrm.reserved();
959 HeapWord* prev_last_addr = NULL;
960 HeapRegion* prev_last_region = NULL;
961
962 // Temporarily disable pretouching of heap pages. This interface is used
963 // when mmap'ing archived heap data in, so pre-touching is wasted.
964 FlagSetting fs(AlwaysPreTouch, false);
965
966 // Enable archive object checking in G1MarkSweep. We have to let it know
967 // about each archive range, so that objects in those ranges aren't marked.
968 G1MarkSweep::enable_archive_object_check();
969
970 // For each specified MemRegion range, allocate the corresponding G1
971 // regions and mark them as archive regions. We expect the ranges in
972 // ascending starting address order, without overlap.
973 for (size_t i = 0; i < count; i++) {
1022 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1023 prev_last_region = last_region;
1024
1025 while (curr_region != NULL) {
1026 assert(curr_region->is_empty() && !curr_region->is_pinned(),
1027 err_msg("Region already in use (index %u)", curr_region->hrm_index()));
1028 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
1029 curr_region->set_allocation_context(AllocationContext::system());
1030 curr_region->set_archive();
1031 _old_set.add(curr_region);
1032 if (curr_region != last_region) {
1033 curr_region->set_top(curr_region->end());
1034 curr_region = _hrm.next_region_in_heap(curr_region);
1035 } else {
1036 curr_region->set_top(last_address + 1);
1037 curr_region = NULL;
1038 }
1039 }
1040
1041 // Notify mark-sweep of the archive range.
1042 G1MarkSweep::set_range_archive(curr_range, true);
1043 }
1044 return true;
1045 }
1046
1047 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
1048 assert(!is_init_completed(), "Expect to be called at JVM init time");
1049 assert(ranges != NULL, "MemRegion array NULL");
1050 assert(count != 0, "No MemRegions provided");
1051 MemRegion reserved = _hrm.reserved();
1052 HeapWord *prev_last_addr = NULL;
1053 HeapRegion* prev_last_region = NULL;
1054
1055 // For each MemRegion, create filler objects, if needed, in the G1 regions
1056 // that contain the address range. The address range actually within the
1057 // MemRegion will not be modified. That is assumed to have been initialized
1058 // elsewhere, probably via an mmap of archived heap data.
1059 MutexLockerEx x(Heap_lock);
1060 for (size_t i = 0; i < count; i++) {
1061 HeapWord* start_address = ranges[i].start();
1062 HeapWord* last_address = ranges[i].last();
1063
1064 assert(reserved.contains(start_address) && reserved.contains(last_address),
1065 err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1066 p2i(start_address), p2i(last_address)));
1067 assert(start_address > prev_last_addr,
1068 err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1109 uint* gc_count_before_ret,
1110 uint* gclocker_retry_count_ret) {
1111 assert_heap_not_locked_and_not_at_safepoint();
1112 assert(!is_humongous(word_size), "attempt_allocation() should not "
1113 "be called for humongous allocation requests");
1114
1115 AllocationContext_t context = AllocationContext::current();
1116 HeapWord* result = _allocator->attempt_allocation(word_size, context);
1117
1118 if (result == NULL) {
1119 result = attempt_allocation_slow(word_size,
1120 context,
1121 gc_count_before_ret,
1122 gclocker_retry_count_ret);
1123 }
1124 assert_heap_not_locked();
1125 if (result != NULL) {
1126 dirty_young_block(result, word_size);
1127 }
1128 return result;
1129 }
1130
1131 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
1132 assert(!is_init_completed(), "Expect to be called at JVM init time");
1133 assert(ranges != NULL, "MemRegion array NULL");
1134 assert(count != 0, "No MemRegions provided");
1135 MemRegion reserved = _hrm.reserved();
1136 HeapWord* prev_last_addr = NULL;
1137 HeapRegion* prev_last_region = NULL;
1138 size_t size_used = 0;
1139 size_t uncommitted_regions = 0;
1140
1141 // For each Memregion, free the G1 regions that constitute it, and
1142 // notify mark-sweep that the range is no longer to be considered 'archive.'
1143 MutexLockerEx x(Heap_lock);
1144 for (size_t i = 0; i < count; i++) {
1145 HeapWord* start_address = ranges[i].start();
1146 HeapWord* last_address = ranges[i].last();
1147
1148 assert(reserved.contains(start_address) && reserved.contains(last_address),
1149 err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1150 p2i(start_address), p2i(last_address)));
1151 assert(start_address > prev_last_addr,
1152 err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1153 p2i(start_address), p2i(prev_last_addr)));
1154 size_used += ranges[i].byte_size();
1155 prev_last_addr = last_address;
1156
1157 HeapRegion* start_region = _hrm.addr_to_region(start_address);
1158 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1159
1160 // Check for ranges that start in the same G1 region in which the previous
1161 // range ended, and adjust the start address so we don't try to free
1162 // the same region again. If the current range is entirely within that
1163 // region, skip it.
1164 if (start_region == prev_last_region) {
1165 start_address = start_region->end();
1166 if (start_address > last_address) {
1167 continue;
1168 }
1169 start_region = _hrm.addr_to_region(start_address);
1170 }
1171 prev_last_region = last_region;
1172
1173 // After verifying that each region was marked as an archive region by
1174 // alloc_archive_regions, set it free and empty and uncommit it.
1175 HeapRegion* curr_region = start_region;
1176 while (curr_region != NULL) {
1177 guarantee(curr_region->is_archive(),
1178 err_msg("Expected archive region at index %u", curr_region->hrm_index()));
1179 uint curr_index = curr_region->hrm_index();
1180 _old_set.remove(curr_region);
1181 curr_region->set_free();
1182 curr_region->set_top(curr_region->bottom());
1183 if (curr_region != last_region) {
1184 curr_region = _hrm.next_region_in_heap(curr_region);
1185 } else {
1186 curr_region = NULL;
1187 }
1188 _hrm.shrink_at(curr_index);
1189 uncommitted_regions++;
1190 }
1191
1192 // Notify mark-sweep that this is no longer an archive range.
1193 G1MarkSweep::set_range_archive(ranges[i], false);
1194 }
1195
1196 if (uncommitted_regions != 0) {
1197 ergo_verbose1(ErgoHeapSizing,
1198 "attempt heap shrinking",
1199 ergo_format_reason("uncommitted archive regions")
1200 ergo_format_byte("total size"),
1201 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
1202 }
1203 decrease_used(size_used);
1204 }
1205
1206 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1207 uint* gc_count_before_ret,
1208 uint* gclocker_retry_count_ret) {
1209 // The structure of this method has a lot of similarities to
1210 // attempt_allocation_slow(). The reason these two were not merged
1211 // into a single one is that such a method would require several "if
1212 // allocation is not humongous do this, otherwise do that"
1213 // conditional paths which would obscure its flow. In fact, an early
1214 // version of this code did use a unified method which was harder to
1215 // follow and, as a result, it had subtle bugs that were hard to
1216 // track down. So keeping these two methods separate allows each to
1217 // be more readable. It will be good to keep these two in sync as
1218 // much as possible.
1219
1220 assert_heap_not_locked_and_not_at_safepoint();
1221 assert(is_humongous(word_size), "attempt_allocation_humongous() "
1222 "should only be called for humongous allocations");
1223
|