29 #include "gc/g1/g1MarkSweep.hpp"
30 #include "gc/g1/heapRegion.inline.hpp"
31 #include "gc/g1/heapRegionSet.inline.hpp"
32
33 void G1DefaultAllocator::init_mutator_alloc_region() {
34 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
35 _mutator_alloc_region.init();
36 }
37
38 void G1DefaultAllocator::release_mutator_alloc_region() {
39 _mutator_alloc_region.release();
40 assert(_mutator_alloc_region.get() == NULL, "post-condition");
41 }
42
43 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
44 OldGCAllocRegion* old,
45 HeapRegion** retained_old) {
46 HeapRegion* retained_region = *retained_old;
47 *retained_old = NULL;
48 assert(retained_region == NULL || !retained_region->is_archive(),
49 "Archive region should not be alloc region");
50
51 // We will discard the current GC alloc region if:
52 // a) it's in the collection set (it can happen!),
53 // b) it's already full (no point in using it),
54 // c) it's empty (this means that it was emptied during
55 // a cleanup and it should be on the free list now), or
56 // d) it's humongous (this means that it was emptied
57 // during a cleanup and was added to the free list, but
58 // has been subsequently used to allocate a humongous
59 // object that may be less than the region size).
60 if (retained_region != NULL &&
61 !retained_region->in_collection_set() &&
62 !(retained_region->top() == retained_region->end()) &&
63 !retained_region->is_empty() &&
64 !retained_region->is_humongous()) {
65 retained_region->record_timestamp();
66 // The retained region was added to the old region set when it was
67 // retired. We have to remove it now, since we don't allow regions
68 // we allocate to in the region sets. We'll re-add it later, when
69 // it's retired again.
155 for (uint state = 0; state < InCSetState::Num; state++) {
156 G1PLAB* const buf = _alloc_buffers[state];
157 if (buf != NULL) {
158 buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
159 }
160 }
161 }
162
163 void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
164 wasted = 0;
165 undo_wasted = 0;
166 for (uint state = 0; state < InCSetState::Num; state++) {
167 G1PLAB * const buf = _alloc_buffers[state];
168 if (buf != NULL) {
169 wasted += buf->waste();
170 undo_wasted += buf->undo_waste();
171 }
172 }
173 }
174
175
176 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
177 // Create the archive allocator, and also enable archive object checking
178 // in mark-sweep, since we will be creating archive regions.
179 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
180 G1MarkSweep::enable_archive_object_check();
181 return result;
182 }
183
184 HeapRegion* G1ArchiveAllocator::alloc_new_region() {
185 // Allocate the highest available region in the reserved heap,
186 // and add it to our list of allocated regions. It is marked
187 // archive and added to the old set.
188 HeapRegion* hr = _g1h->alloc_highest_available_region();
189 assert(hr->top() == hr->bottom(), "expected empty region");
190 hr->set_archive();
191 _g1h->_old_set.add(hr);
192 _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
193 _allocated_regions.append(hr);
194 _allocation_region = hr;
195
196 // Set up _bottom and _max to begin allocating in the lowest
197 // min_region_size'd chunk of the allocated G1 region.
198 _bottom = hr->bottom();
199 _max = _bottom + HeapRegion::min_region_size_in_words();
200
201 // Tell mark-sweep that objects in this region are not to be marked.
202 G1MarkSweep::mark_range_archive(_bottom, hr->end() - 1);
203
204 // Since we've modified the old set, call update_sizes.
205 _g1h->g1mm()->update_sizes();
206 return hr;
207 }
208
209 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
210 if (_allocation_region == NULL) {
211 alloc_new_region();
212 }
213 HeapWord* old_top = _allocation_region->top();
214 assert(_bottom >= _allocation_region->bottom(),
215 err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
216 p2i(_bottom), p2i(_allocation_region->bottom())));
217 assert(_max <= _allocation_region->end(),
218 err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
219 p2i(_max), p2i(_allocation_region->end())));
220 assert(_bottom <= old_top && old_top <= _max,
221 err_msg("inconsistent allocation state: expected "
222 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
223 p2i(_bottom), p2i(old_top), p2i(_max)));
224
225 // Allocate the next word_size words in the current allocation chunk.
226 // If allocation would cross the _max boundary, insert a fill and begin
227 // at the base of the next min_region_size'd chunk. Also advance to the next
228 // chunk if we don't yet cross the boundary, but the remainder would be too
229 // small to fill.
230 HeapWord* new_top = old_top + word_size;
231 size_t remainder = (size_t)(_max - new_top);
232 if ((new_top > _max) ||
233 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
234 if (old_top != _max) {
235 size_t fill_size = _max - old_top;
236 CollectedHeap::fill_with_object(old_top, fill_size);
237 _summary_bytes_used += fill_size * HeapWordSize;
238 }
239 _allocation_region->set_top(_max);
240 old_top = _bottom = _max;
241
242 // Check if we've just used up the last min_region_size'd chunk
243 // in the current region, and if so, allocate a new one.
244 if (_bottom != _allocation_region->end()) {
245 _max = _bottom + HeapRegion::min_region_size_in_words();
246 } else {
247 alloc_new_region();
248 old_top = _allocation_region->bottom();
249 }
250 }
251 _allocation_region->set_top(old_top + word_size);
252 _summary_bytes_used += word_size * HeapWordSize;
253
254 return old_top;
255 }
256
257 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
258 uint end_alignment) {
259 assert((end_alignment >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
260 "alignment too large");
261 // If we've allocated nothing, simply return.
262 if (_allocation_region == NULL) {
263 return;
264 }
265
266 // If an end alignment was requested, insert filler objects.
267 if (end_alignment != 0) {
268 HeapWord* currtop = _allocation_region->top();
269 HeapWord* newtop = (HeapWord*)round_to((intptr_t)currtop, end_alignment);
270 size_t fill_size = newtop - currtop;
271 if (fill_size != 0) {
272 HeapWord* fill = archive_mem_allocate(fill_size);
273 CollectedHeap::fill_with_objects(fill, fill_size);
274 }
275 }
276
277 // Loop through the allocated regions, and create MemRegions summarizing
278 // the allocated address range, combining contiguous ranges. Add the
279 // MemRegions to the growable array provided by the caller.
280 int index = _allocated_regions.length() - 1;
281 assert(_allocated_regions.at(index) == _allocation_region, "expect current region at end of array");
282 HeapWord* base_address = _allocation_region->bottom();
283 HeapWord* top = base_address;
284
285 while (index >= 0) {
286 HeapRegion* next = _allocated_regions.at(index--);
287 HeapWord* new_base = next->bottom();
288 HeapWord* new_top = next->top();
289 if (new_base != top) {
290 ranges->append(MemRegion(base_address, top - base_address));
291 base_address = new_base;
292 }
293 top = new_top;
294 }
295
296 ranges->append(MemRegion(base_address, top - base_address));
297 _allocated_regions.clear();
298 _allocation_region = NULL;
299
300 return;
301
302 };
|
29 #include "gc/g1/g1MarkSweep.hpp"
30 #include "gc/g1/heapRegion.inline.hpp"
31 #include "gc/g1/heapRegionSet.inline.hpp"
32
33 void G1DefaultAllocator::init_mutator_alloc_region() {
34 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
35 _mutator_alloc_region.init();
36 }
37
38 void G1DefaultAllocator::release_mutator_alloc_region() {
39 _mutator_alloc_region.release();
40 assert(_mutator_alloc_region.get() == NULL, "post-condition");
41 }
42
43 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
44 OldGCAllocRegion* old,
45 HeapRegion** retained_old) {
46 HeapRegion* retained_region = *retained_old;
47 *retained_old = NULL;
48 assert(retained_region == NULL || !retained_region->is_archive(),
49 err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
50
51 // We will discard the current GC alloc region if:
52 // a) it's in the collection set (it can happen!),
53 // b) it's already full (no point in using it),
54 // c) it's empty (this means that it was emptied during
55 // a cleanup and it should be on the free list now), or
56 // d) it's humongous (this means that it was emptied
57 // during a cleanup and was added to the free list, but
58 // has been subsequently used to allocate a humongous
59 // object that may be less than the region size).
60 if (retained_region != NULL &&
61 !retained_region->in_collection_set() &&
62 !(retained_region->top() == retained_region->end()) &&
63 !retained_region->is_empty() &&
64 !retained_region->is_humongous()) {
65 retained_region->record_timestamp();
66 // The retained region was added to the old region set when it was
67 // retired. We have to remove it now, since we don't allow regions
68 // we allocate to in the region sets. We'll re-add it later, when
69 // it's retired again.
155 for (uint state = 0; state < InCSetState::Num; state++) {
156 G1PLAB* const buf = _alloc_buffers[state];
157 if (buf != NULL) {
158 buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
159 }
160 }
161 }
162
163 void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
164 wasted = 0;
165 undo_wasted = 0;
166 for (uint state = 0; state < InCSetState::Num; state++) {
167 G1PLAB * const buf = _alloc_buffers[state];
168 if (buf != NULL) {
169 wasted += buf->waste();
170 undo_wasted += buf->undo_waste();
171 }
172 }
173 }
174
175 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
176 // Create the archive allocator, and also enable archive object checking
177 // in mark-sweep, since we will be creating archive regions.
178 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
179 G1MarkSweep::enable_archive_object_check();
180 return result;
181 }
182
183 bool G1ArchiveAllocator::alloc_new_region() {
184 // Allocate the highest free region in the reserved heap,
185 // and add it to our list of allocated regions. It is marked
186 // archive and added to the old set.
187 HeapRegion* hr = _g1h->alloc_highest_free_region();
188 if (hr == NULL) {
189 return false;
190 }
191 assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
192 hr->set_archive();
193 _g1h->_old_set.add(hr);
194 _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
195 _allocated_regions.append(hr);
196 _allocation_region = hr;
197
198 // Set up _bottom and _max to begin allocating in the lowest
199 // min_region_size'd chunk of the allocated G1 region.
200 _bottom = hr->bottom();
201 _max = _bottom + HeapRegion::min_region_size_in_words();
202
203 // Tell mark-sweep that objects in this region are not to be marked.
204 G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
205
206 // Since we've modified the old set, call update_sizes.
207 _g1h->g1mm()->update_sizes();
208 return true;
209 }
210
211 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
212 assert(word_size != 0, "size must not be zero");
213 if (_allocation_region == NULL) {
214 if (!alloc_new_region()) {
215 return NULL;
216 }
217 }
218 HeapWord* old_top = _allocation_region->top();
219 assert(_bottom >= _allocation_region->bottom(),
220 err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
221 p2i(_bottom), p2i(_allocation_region->bottom())));
222 assert(_max <= _allocation_region->end(),
223 err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
224 p2i(_max), p2i(_allocation_region->end())));
225 assert(_bottom <= old_top && old_top <= _max,
226 err_msg("inconsistent allocation state: expected "
227 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
228 p2i(_bottom), p2i(old_top), p2i(_max)));
229
230 // Allocate the next word_size words in the current allocation chunk.
231 // If allocation would cross the _max boundary, insert a filler and begin
232 // at the base of the next min_region_size'd chunk. Also advance to the next
233 // chunk if we don't yet cross the boundary, but the remainder would be too
234 // small to fill.
235 HeapWord* new_top = old_top + word_size;
236 size_t remainder = pointer_delta(_max, new_top);
237 if ((new_top > _max) ||
238 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
239 if (old_top != _max) {
240 size_t fill_size = pointer_delta(_max, old_top);
241 CollectedHeap::fill_with_object(old_top, fill_size);
242 _summary_bytes_used += fill_size * HeapWordSize;
243 }
244 _allocation_region->set_top(_max);
245 old_top = _bottom = _max;
246
247 // Check if we've just used up the last min_region_size'd chunk
248 // in the current region, and if so, allocate a new one.
249 if (_bottom != _allocation_region->end()) {
250 _max = _bottom + HeapRegion::min_region_size_in_words();
251 } else {
252 if (!alloc_new_region()) {
253 return NULL;
254 }
255 old_top = _allocation_region->bottom();
256 }
257 }
258 _allocation_region->set_top(old_top + word_size);
259 _summary_bytes_used += word_size * HeapWordSize;
260
261 return old_top;
262 }
263
264 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
265 size_t end_alignment_in_bytes) {
266 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
267 err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
268 assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
269 err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
270
271 // If we've allocated nothing, simply return.
272 if (_allocation_region == NULL) {
273 return;
274 }
275
276 // If an end alignment was requested, insert filler objects.
277 if (end_alignment_in_bytes != 0) {
278 HeapWord* currtop = _allocation_region->top();
279 HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
280 size_t fill_size = pointer_delta(newtop, currtop);
281 if (fill_size != 0) {
282 if (fill_size < CollectedHeap::min_fill_size()) {
283 // If the required fill is smaller than we can represent,
284 // bump up to the next aligned address. We know we won't exceed the current
285 // region boundary because the max supported alignment is smaller than the min
286 // region size, and because the allocation code never leaves space smaller than
287 // the min_fill_size at the top of the current allocation region.
288 newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
289 end_alignment_in_bytes);
290 fill_size = pointer_delta(newtop, currtop);
291 }
292 HeapWord* fill = archive_mem_allocate(fill_size);
293 CollectedHeap::fill_with_objects(fill, fill_size);
294 }
295 }
296
297 // Loop through the allocated regions, and create MemRegions summarizing
298 // the allocated address range, combining contiguous ranges. Add the
299 // MemRegions to the GrowableArray provided by the caller.
300 int index = _allocated_regions.length() - 1;
301 assert(_allocated_regions.at(index) == _allocation_region,
302 err_msg("expected region %u at end of array, found %u",
303 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
304 HeapWord* base_address = _allocation_region->bottom();
305 HeapWord* top = base_address;
306
307 while (index >= 0) {
308 HeapRegion* next = _allocated_regions.at(index);
309 HeapWord* new_base = next->bottom();
310 HeapWord* new_top = next->top();
311 if (new_base != top) {
312 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
313 base_address = new_base;
314 }
315 top = new_top;
316 index = index - 1;
317 }
318
319 assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
320 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
321 _allocated_regions.clear();
322 _allocation_region = NULL;
323 };
|