1027 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1028 prev_last_region = last_region;
1029
1030 while (curr_region != NULL) {
1031 assert(curr_region->is_empty() && !curr_region->is_pinned(),
1032 err_msg("Region already in use (index %u)", curr_region->hrm_index()));
1033 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
1034 curr_region->set_allocation_context(AllocationContext::system());
1035 curr_region->set_archive();
1036 _old_set.add(curr_region);
1037 if (curr_region != last_region) {
1038 curr_region->set_top(curr_region->end());
1039 curr_region = _hrm.next_region_in_heap(curr_region);
1040 } else {
1041 curr_region->set_top(last_address + 1);
1042 curr_region = NULL;
1043 }
1044 }
1045
1046 // Notify mark-sweep of the archive range.
1047 G1MarkSweep::mark_range_archive(curr_range);
1048 }
1049 return true;
1050 }
1051
1052 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
1053 assert(ranges != NULL, "MemRegion array NULL");
1054 assert(count != 0, "No MemRegions provided");
1055 MemRegion reserved = _hrm.reserved();
1056 HeapWord *prev_last_addr = NULL;
1057 HeapRegion* prev_last_region = NULL;
1058
1059 // For each MemRegion, create filler objects, if needed, in the G1 regions
1060 // that contain the address range. The address range actually within the
1061 // MemRegion will not be modified. That is assumed to have been initialized
1062 // elsewhere, probably via an mmap of archived heap data.
1063 MutexLockerEx x(Heap_lock);
1064 for (size_t i = 0; i < count; i++) {
1065 HeapWord* start_address = ranges[i].start();
1066 HeapWord* last_address = ranges[i].last();
1067
1091 if (curr_region != last_region) {
1092 curr_region = _hrm.next_region_in_heap(curr_region);
1093 } else {
1094 curr_region = NULL;
1095 }
1096 }
1097
1098 prev_last_addr = last_address;
1099 prev_last_region = last_region;
1100
1101 // Fill the memory below the allocated range with dummy object(s),
1102 // if the region bottom does not match the range start, or if the previous
1103 // range ended within the same G1 region, and there is a gap.
1104 if (start_address != bottom_address) {
1105 size_t fill_size = pointer_delta(start_address, bottom_address);
1106 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
1107 increase_used(fill_size * HeapWordSize);
1108 }
1109 }
1110 }
1111
1112 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1113 uint* gc_count_before_ret,
1114 uint* gclocker_retry_count_ret) {
1115 // The structure of this method has a lot of similarities to
1116 // attempt_allocation_slow(). The reason these two were not merged
1117 // into a single one is that such a method would require several "if
1118 // allocation is not humongous do this, otherwise do that"
1119 // conditional paths which would obscure its flow. In fact, an early
1120 // version of this code did use a unified method which was harder to
1121 // follow and, as a result, it had subtle bugs that were hard to
1122 // track down. So keeping these two methods separate allows each to
1123 // be more readable. It will be good to keep these two in sync as
1124 // much as possible.
1125
1126 assert_heap_not_locked_and_not_at_safepoint();
1127 assert(is_humongous(word_size), "attempt_allocation_humongous() "
1128 "should only be called for humongous allocations");
1129
1130 // Humongous objects can exhaust the heap quickly, so we should check if we
|
1027 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1028 prev_last_region = last_region;
1029
1030 while (curr_region != NULL) {
1031 assert(curr_region->is_empty() && !curr_region->is_pinned(),
1032 err_msg("Region already in use (index %u)", curr_region->hrm_index()));
1033 _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
1034 curr_region->set_allocation_context(AllocationContext::system());
1035 curr_region->set_archive();
1036 _old_set.add(curr_region);
1037 if (curr_region != last_region) {
1038 curr_region->set_top(curr_region->end());
1039 curr_region = _hrm.next_region_in_heap(curr_region);
1040 } else {
1041 curr_region->set_top(last_address + 1);
1042 curr_region = NULL;
1043 }
1044 }
1045
1046 // Notify mark-sweep of the archive range.
1047 G1MarkSweep::mark_range_archive(curr_range, true);
1048 }
1049 return true;
1050 }
1051
1052 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
1053 assert(ranges != NULL, "MemRegion array NULL");
1054 assert(count != 0, "No MemRegions provided");
1055 MemRegion reserved = _hrm.reserved();
1056 HeapWord *prev_last_addr = NULL;
1057 HeapRegion* prev_last_region = NULL;
1058
1059 // For each MemRegion, create filler objects, if needed, in the G1 regions
1060 // that contain the address range. The address range actually within the
1061 // MemRegion will not be modified. That is assumed to have been initialized
1062 // elsewhere, probably via an mmap of archived heap data.
1063 MutexLockerEx x(Heap_lock);
1064 for (size_t i = 0; i < count; i++) {
1065 HeapWord* start_address = ranges[i].start();
1066 HeapWord* last_address = ranges[i].last();
1067
1091 if (curr_region != last_region) {
1092 curr_region = _hrm.next_region_in_heap(curr_region);
1093 } else {
1094 curr_region = NULL;
1095 }
1096 }
1097
1098 prev_last_addr = last_address;
1099 prev_last_region = last_region;
1100
1101 // Fill the memory below the allocated range with dummy object(s),
1102 // if the region bottom does not match the range start, or if the previous
1103 // range ended within the same G1 region, and there is a gap.
1104 if (start_address != bottom_address) {
1105 size_t fill_size = pointer_delta(start_address, bottom_address);
1106 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
1107 increase_used(fill_size * HeapWordSize);
1108 }
1109 }
1110 }
1111
1112 void G1CollectedHeap::free_archive_regions(MemRegion* ranges, size_t count) {
1113 assert(ranges != NULL, "MemRegion array NULL");
1114 assert(count != 0, "No MemRegions provided");
1115 MemRegion reserved = _hrm.reserved();
1116 HeapWord* prev_last_addr = NULL;
1117 HeapRegion* prev_last_region = NULL;
1118 FreeRegionList local_free_list("Local List for Freeing Archive Regions");
1119 size_t size_used = 0;
1120
1121 // For each Memregion, free the G1 regions that constitute it, and
1122 // notify mark-sweep that the range is no longer to be considered 'archive.'
1123 MutexLockerEx x(Heap_lock);
1124 for (size_t i = 0; i < count; i++) {
1125 HeapWord* start_address = ranges[i].start();
1126 HeapWord* last_address = ranges[i].last();
1127
1128 assert(reserved.contains(start_address) && reserved.contains(last_address),
1129 err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
1130 p2i(start_address), p2i(last_address)));
1131 assert(start_address > prev_last_addr,
1132 err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
1133 p2i(start_address), p2i(prev_last_addr)));
1134 size_used += ranges[i].word_size() * HeapWordSize;
1135 prev_last_addr = last_address;
1136
1137 HeapRegion* start_region = _hrm.addr_to_region(start_address);
1138 HeapRegion* last_region = _hrm.addr_to_region(last_address);
1139
1140 // Check for ranges that start in the same G1 region in which the previous
1141 // range ended, and adjust the start address so we don't try to free
1142 // the same region again. If the current range is entirely within that
1143 // region, skip it.
1144 if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
1145 start_address = start_region->end();
1146 if (start_address > last_address) {
1147 continue;
1148 }
1149 start_region = _hrm.addr_to_region(start_address);
1150 }
1151 prev_last_region = last_region;
1152
1153 // After verifying that each region was marked as an archive region by
1154 // alloc_archive_regions, free it.
1155 HeapRegion* curr_region = start_region;
1156 while (curr_region != NULL) {
1157 guarantee(curr_region->is_archive(),
1158 err_msg("Expected archive region at index %u", curr_region->hrm_index()));
1159
1160 _old_set.remove(curr_region);
1161 free_region(curr_region, &local_free_list, false /* par */, true /* locked */);
1162 if (curr_region != last_region) {
1163 curr_region = _hrm.next_region_in_heap(curr_region);
1164 } else {
1165 curr_region = NULL;
1166 }
1167 }
1168
1169 // Notify mark-sweep that this is no longer an archive range.
1170 G1MarkSweep::mark_range_archive(ranges[i], false);
1171 }
1172
1173 prepend_to_freelist(&local_free_list);
1174 decrease_used(size_used);
1175 }
1176
1177
1178 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1179 uint* gc_count_before_ret,
1180 uint* gclocker_retry_count_ret) {
1181 // The structure of this method has a lot of similarities to
1182 // attempt_allocation_slow(). The reason these two were not merged
1183 // into a single one is that such a method would require several "if
1184 // allocation is not humongous do this, otherwise do that"
1185 // conditional paths which would obscure its flow. In fact, an early
1186 // version of this code did use a unified method which was harder to
1187 // follow and, as a result, it had subtle bugs that were hard to
1188 // track down. So keeping these two methods separate allows each to
1189 // be more readable. It will be good to keep these two in sync as
1190 // much as possible.
1191
1192 assert_heap_not_locked_and_not_at_safepoint();
1193 assert(is_humongous(word_size), "attempt_allocation_humongous() "
1194 "should only be called for humongous allocations");
1195
1196 // Humongous objects can exhaust the heap quickly, so we should check if we
|