37 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
38 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
39 }
40
41 void G1DefaultAllocator::init_mutator_alloc_region() {
42 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
43 _mutator_alloc_region.init();
44 }
45
46 void G1DefaultAllocator::release_mutator_alloc_region() {
47 _mutator_alloc_region.release();
48 assert(_mutator_alloc_region.get() == NULL, "post-condition");
49 }
50
51 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
52 OldGCAllocRegion* old,
53 HeapRegion** retained_old) {
54 HeapRegion* retained_region = *retained_old;
55 *retained_old = NULL;
56 assert(retained_region == NULL || !retained_region->is_archive(),
57 err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
58
59 // We will discard the current GC alloc region if:
60 // a) it's in the collection set (it can happen!),
61 // b) it's already full (no point in using it),
62 // c) it's empty (this means that it was emptied during
63 // a cleanup and it should be on the free list now), or
64 // d) it's humongous (this means that it was emptied
65 // during a cleanup and was added to the free list, but
66 // has been subsequently used to allocate a humongous
67 // object that may be less than the region size).
68 if (retained_region != NULL &&
69 !retained_region->in_collection_set() &&
70 !(retained_region->top() == retained_region->end()) &&
71 !retained_region->is_empty() &&
72 !retained_region->is_humongous()) {
73 retained_region->record_timestamp();
74 // The retained region was added to the old region set when it was
75 // retired. We have to remove it now, since we don't allow regions
76 // we allocate to in the region sets. We'll re-add it later, when
77 // it's retired again.
130
131 // Also, this value can be at most the humongous object threshold,
132 // since we can't allow tlabs to grow big enough to accommodate
133 // humongous objects.
134
135 HeapRegion* hr = mutator_alloc_region(context)->get();
136 size_t max_tlab = _g1h->max_tlab_size() * wordSize;
137 if (hr == NULL) {
138 return max_tlab;
139 } else {
140 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
141 }
142 }
143
144 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
145 size_t word_size,
146 AllocationContext_t context) {
147 size_t temp = 0;
148 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
149 assert(result == NULL || temp == word_size,
150 err_msg("Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
151 word_size, temp, p2i(result)));
152 return result;
153 }
154
155 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
156 size_t min_word_size,
157 size_t desired_word_size,
158 size_t* actual_word_size,
159 AllocationContext_t context) {
160 switch (dest.value()) {
161 case InCSetState::Young:
162 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
163 case InCSetState::Old:
164 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
165 default:
166 ShouldNotReachHere();
167 return NULL; // Keep some compilers happy
168 }
169 }
170
171 bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
259 bool* plab_refill_failed) {
260 size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
261 size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
262
263 // Only get a new PLAB if the allocation fits and it would not waste more than
264 // ParallelGCBufferWastePct in the existing buffer.
265 if ((required_in_plab <= plab_word_size) &&
266 may_throw_away_buffer(required_in_plab, plab_word_size)) {
267
268 G1PLAB* alloc_buf = alloc_buffer(dest, context);
269 alloc_buf->retire();
270
271 size_t actual_plab_size = 0;
272 HeapWord* buf = _allocator->par_allocate_during_gc(dest,
273 required_in_plab,
274 plab_word_size,
275 &actual_plab_size,
276 context);
277
278 assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
279 err_msg("Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
280 required_in_plab, plab_word_size, actual_plab_size, p2i(buf)));
281
282 if (buf != NULL) {
283 alloc_buf->set_buf(buf, actual_plab_size);
284
285 HeapWord* const obj = alloc_buf->allocate(word_sz);
286 assert(obj != NULL, err_msg("PLAB should have been big enough, tried to allocate "
287 SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
288 word_sz, required_in_plab, plab_word_size));
289 return obj;
290 }
291 // Otherwise.
292 *plab_refill_failed = true;
293 }
294 // Try direct allocation.
295 HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
296 if (result != NULL) {
297 _direct_allocated[dest.value()] += word_sz;
298 }
299 return result;
300 }
301
302 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
303 alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
304 }
305
306 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
307 G1PLABAllocator(allocator),
308 _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
337 }
338 }
339 }
340
341 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
342 // Create the archive allocator, and also enable archive object checking
343 // in mark-sweep, since we will be creating archive regions.
344 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
345 G1MarkSweep::enable_archive_object_check();
346 return result;
347 }
348
349 bool G1ArchiveAllocator::alloc_new_region() {
350 // Allocate the highest free region in the reserved heap,
351 // and add it to our list of allocated regions. It is marked
352 // archive and added to the old set.
353 HeapRegion* hr = _g1h->alloc_highest_free_region();
354 if (hr == NULL) {
355 return false;
356 }
357 assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
358 hr->set_archive();
359 _g1h->old_set_add(hr);
360 _g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive);
361 _allocated_regions.append(hr);
362 _allocation_region = hr;
363
364 // Set up _bottom and _max to begin allocating in the lowest
365 // min_region_size'd chunk of the allocated G1 region.
366 _bottom = hr->bottom();
367 _max = _bottom + HeapRegion::min_region_size_in_words();
368
369 // Tell mark-sweep that objects in this region are not to be marked.
370 G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
371
372 // Since we've modified the old set, call update_sizes.
373 _g1h->g1mm()->update_sizes();
374 return true;
375 }
376
377 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
378 assert(word_size != 0, "size must not be zero");
379 if (_allocation_region == NULL) {
380 if (!alloc_new_region()) {
381 return NULL;
382 }
383 }
384 HeapWord* old_top = _allocation_region->top();
385 assert(_bottom >= _allocation_region->bottom(),
386 err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
387 p2i(_bottom), p2i(_allocation_region->bottom())));
388 assert(_max <= _allocation_region->end(),
389 err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
390 p2i(_max), p2i(_allocation_region->end())));
391 assert(_bottom <= old_top && old_top <= _max,
392 err_msg("inconsistent allocation state: expected "
393 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
394 p2i(_bottom), p2i(old_top), p2i(_max)));
395
396 // Allocate the next word_size words in the current allocation chunk.
397 // If allocation would cross the _max boundary, insert a filler and begin
398 // at the base of the next min_region_size'd chunk. Also advance to the next
399 // chunk if we don't yet cross the boundary, but the remainder would be too
400 // small to fill.
401 HeapWord* new_top = old_top + word_size;
402 size_t remainder = pointer_delta(_max, new_top);
403 if ((new_top > _max) ||
404 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
405 if (old_top != _max) {
406 size_t fill_size = pointer_delta(_max, old_top);
407 CollectedHeap::fill_with_object(old_top, fill_size);
408 _summary_bytes_used += fill_size * HeapWordSize;
409 }
410 _allocation_region->set_top(_max);
411 old_top = _bottom = _max;
412
413 // Check if we've just used up the last min_region_size'd chunk
414 // in the current region, and if so, allocate a new one.
415 if (_bottom != _allocation_region->end()) {
416 _max = _bottom + HeapRegion::min_region_size_in_words();
417 } else {
418 if (!alloc_new_region()) {
419 return NULL;
420 }
421 old_top = _allocation_region->bottom();
422 }
423 }
424 _allocation_region->set_top(old_top + word_size);
425 _summary_bytes_used += word_size * HeapWordSize;
426
427 return old_top;
428 }
429
430 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
431 size_t end_alignment_in_bytes) {
432 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
433 err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
434 assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
435 err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
436
437 // If we've allocated nothing, simply return.
438 if (_allocation_region == NULL) {
439 return;
440 }
441
442 // If an end alignment was requested, insert filler objects.
443 if (end_alignment_in_bytes != 0) {
444 HeapWord* currtop = _allocation_region->top();
445 HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
446 size_t fill_size = pointer_delta(newtop, currtop);
447 if (fill_size != 0) {
448 if (fill_size < CollectedHeap::min_fill_size()) {
449 // If the required fill is smaller than we can represent,
450 // bump up to the next aligned address. We know we won't exceed the current
451 // region boundary because the max supported alignment is smaller than the min
452 // region size, and because the allocation code never leaves space smaller than
453 // the min_fill_size at the top of the current allocation region.
454 newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
455 end_alignment_in_bytes);
456 fill_size = pointer_delta(newtop, currtop);
457 }
458 HeapWord* fill = archive_mem_allocate(fill_size);
459 CollectedHeap::fill_with_objects(fill, fill_size);
460 }
461 }
462
463 // Loop through the allocated regions, and create MemRegions summarizing
464 // the allocated address range, combining contiguous ranges. Add the
465 // MemRegions to the GrowableArray provided by the caller.
466 int index = _allocated_regions.length() - 1;
467 assert(_allocated_regions.at(index) == _allocation_region,
468 err_msg("expected region %u at end of array, found %u",
469 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
470 HeapWord* base_address = _allocation_region->bottom();
471 HeapWord* top = base_address;
472
473 while (index >= 0) {
474 HeapRegion* next = _allocated_regions.at(index);
475 HeapWord* new_base = next->bottom();
476 HeapWord* new_top = next->top();
477 if (new_base != top) {
478 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
479 base_address = new_base;
480 }
481 top = new_top;
482 index = index - 1;
483 }
484
485 assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
486 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
487 _allocated_regions.clear();
488 _allocation_region = NULL;
489 };
|
37 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
38 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
39 }
40
41 void G1DefaultAllocator::init_mutator_alloc_region() {
42 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
43 _mutator_alloc_region.init();
44 }
45
46 void G1DefaultAllocator::release_mutator_alloc_region() {
47 _mutator_alloc_region.release();
48 assert(_mutator_alloc_region.get() == NULL, "post-condition");
49 }
50
51 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
52 OldGCAllocRegion* old,
53 HeapRegion** retained_old) {
54 HeapRegion* retained_region = *retained_old;
55 *retained_old = NULL;
56 assert(retained_region == NULL || !retained_region->is_archive(),
57 "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
58
59 // We will discard the current GC alloc region if:
60 // a) it's in the collection set (it can happen!),
61 // b) it's already full (no point in using it),
62 // c) it's empty (this means that it was emptied during
63 // a cleanup and it should be on the free list now), or
64 // d) it's humongous (this means that it was emptied
65 // during a cleanup and was added to the free list, but
66 // has been subsequently used to allocate a humongous
67 // object that may be less than the region size).
68 if (retained_region != NULL &&
69 !retained_region->in_collection_set() &&
70 !(retained_region->top() == retained_region->end()) &&
71 !retained_region->is_empty() &&
72 !retained_region->is_humongous()) {
73 retained_region->record_timestamp();
74 // The retained region was added to the old region set when it was
75 // retired. We have to remove it now, since we don't allow regions
76 // we allocate to in the region sets. We'll re-add it later, when
77 // it's retired again.
130
131 // Also, this value can be at most the humongous object threshold,
132 // since we can't allow tlabs to grow big enough to accommodate
133 // humongous objects.
134
135 HeapRegion* hr = mutator_alloc_region(context)->get();
136 size_t max_tlab = _g1h->max_tlab_size() * wordSize;
137 if (hr == NULL) {
138 return max_tlab;
139 } else {
140 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
141 }
142 }
143
144 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
145 size_t word_size,
146 AllocationContext_t context) {
147 size_t temp = 0;
148 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
149 assert(result == NULL || temp == word_size,
150 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
151 word_size, temp, p2i(result));
152 return result;
153 }
154
155 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
156 size_t min_word_size,
157 size_t desired_word_size,
158 size_t* actual_word_size,
159 AllocationContext_t context) {
160 switch (dest.value()) {
161 case InCSetState::Young:
162 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
163 case InCSetState::Old:
164 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
165 default:
166 ShouldNotReachHere();
167 return NULL; // Keep some compilers happy
168 }
169 }
170
171 bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
259 bool* plab_refill_failed) {
260 size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
261 size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
262
263 // Only get a new PLAB if the allocation fits and it would not waste more than
264 // ParallelGCBufferWastePct in the existing buffer.
265 if ((required_in_plab <= plab_word_size) &&
266 may_throw_away_buffer(required_in_plab, plab_word_size)) {
267
268 G1PLAB* alloc_buf = alloc_buffer(dest, context);
269 alloc_buf->retire();
270
271 size_t actual_plab_size = 0;
272 HeapWord* buf = _allocator->par_allocate_during_gc(dest,
273 required_in_plab,
274 plab_word_size,
275 &actual_plab_size,
276 context);
277
278 assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
279 "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
280 required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
281
282 if (buf != NULL) {
283 alloc_buf->set_buf(buf, actual_plab_size);
284
285 HeapWord* const obj = alloc_buf->allocate(word_sz);
286 assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
287 SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
288 word_sz, required_in_plab, plab_word_size);
289 return obj;
290 }
291 // Otherwise.
292 *plab_refill_failed = true;
293 }
294 // Try direct allocation.
295 HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
296 if (result != NULL) {
297 _direct_allocated[dest.value()] += word_sz;
298 }
299 return result;
300 }
301
302 void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
303 alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
304 }
305
306 G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
307 G1PLABAllocator(allocator),
308 _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
337 }
338 }
339 }
340
341 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
342 // Create the archive allocator, and also enable archive object checking
343 // in mark-sweep, since we will be creating archive regions.
344 G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
345 G1MarkSweep::enable_archive_object_check();
346 return result;
347 }
348
349 bool G1ArchiveAllocator::alloc_new_region() {
350 // Allocate the highest free region in the reserved heap,
351 // and add it to our list of allocated regions. It is marked
352 // archive and added to the old set.
353 HeapRegion* hr = _g1h->alloc_highest_free_region();
354 if (hr == NULL) {
355 return false;
356 }
357 assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
358 hr->set_archive();
359 _g1h->old_set_add(hr);
360 _g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive);
361 _allocated_regions.append(hr);
362 _allocation_region = hr;
363
364 // Set up _bottom and _max to begin allocating in the lowest
365 // min_region_size'd chunk of the allocated G1 region.
366 _bottom = hr->bottom();
367 _max = _bottom + HeapRegion::min_region_size_in_words();
368
369 // Tell mark-sweep that objects in this region are not to be marked.
370 G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true);
371
372 // Since we've modified the old set, call update_sizes.
373 _g1h->g1mm()->update_sizes();
374 return true;
375 }
376
377 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
378 assert(word_size != 0, "size must not be zero");
379 if (_allocation_region == NULL) {
380 if (!alloc_new_region()) {
381 return NULL;
382 }
383 }
384 HeapWord* old_top = _allocation_region->top();
385 assert(_bottom >= _allocation_region->bottom(),
386 "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
387 p2i(_bottom), p2i(_allocation_region->bottom()));
388 assert(_max <= _allocation_region->end(),
389 "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
390 p2i(_max), p2i(_allocation_region->end()));
391 assert(_bottom <= old_top && old_top <= _max,
392 "inconsistent allocation state: expected "
393 PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
394 p2i(_bottom), p2i(old_top), p2i(_max));
395
396 // Allocate the next word_size words in the current allocation chunk.
397 // If allocation would cross the _max boundary, insert a filler and begin
398 // at the base of the next min_region_size'd chunk. Also advance to the next
399 // chunk if we don't yet cross the boundary, but the remainder would be too
400 // small to fill.
401 HeapWord* new_top = old_top + word_size;
402 size_t remainder = pointer_delta(_max, new_top);
403 if ((new_top > _max) ||
404 ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
405 if (old_top != _max) {
406 size_t fill_size = pointer_delta(_max, old_top);
407 CollectedHeap::fill_with_object(old_top, fill_size);
408 _summary_bytes_used += fill_size * HeapWordSize;
409 }
410 _allocation_region->set_top(_max);
411 old_top = _bottom = _max;
412
413 // Check if we've just used up the last min_region_size'd chunk
414 // in the current region, and if so, allocate a new one.
415 if (_bottom != _allocation_region->end()) {
416 _max = _bottom + HeapRegion::min_region_size_in_words();
417 } else {
418 if (!alloc_new_region()) {
419 return NULL;
420 }
421 old_top = _allocation_region->bottom();
422 }
423 }
424 _allocation_region->set_top(old_top + word_size);
425 _summary_bytes_used += word_size * HeapWordSize;
426
427 return old_top;
428 }
429
430 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
431 size_t end_alignment_in_bytes) {
432 assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
433 "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
434 assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
435 "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
436
437 // If we've allocated nothing, simply return.
438 if (_allocation_region == NULL) {
439 return;
440 }
441
442 // If an end alignment was requested, insert filler objects.
443 if (end_alignment_in_bytes != 0) {
444 HeapWord* currtop = _allocation_region->top();
445 HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
446 size_t fill_size = pointer_delta(newtop, currtop);
447 if (fill_size != 0) {
448 if (fill_size < CollectedHeap::min_fill_size()) {
449 // If the required fill is smaller than we can represent,
450 // bump up to the next aligned address. We know we won't exceed the current
451 // region boundary because the max supported alignment is smaller than the min
452 // region size, and because the allocation code never leaves space smaller than
453 // the min_fill_size at the top of the current allocation region.
454 newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
455 end_alignment_in_bytes);
456 fill_size = pointer_delta(newtop, currtop);
457 }
458 HeapWord* fill = archive_mem_allocate(fill_size);
459 CollectedHeap::fill_with_objects(fill, fill_size);
460 }
461 }
462
463 // Loop through the allocated regions, and create MemRegions summarizing
464 // the allocated address range, combining contiguous ranges. Add the
465 // MemRegions to the GrowableArray provided by the caller.
466 int index = _allocated_regions.length() - 1;
467 assert(_allocated_regions.at(index) == _allocation_region,
468 "expected region %u at end of array, found %u",
469 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
470 HeapWord* base_address = _allocation_region->bottom();
471 HeapWord* top = base_address;
472
473 while (index >= 0) {
474 HeapRegion* next = _allocated_regions.at(index);
475 HeapWord* new_base = next->bottom();
476 HeapWord* new_top = next->top();
477 if (new_base != top) {
478 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
479 base_address = new_base;
480 }
481 top = new_top;
482 index = index - 1;
483 }
484
485 assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
486 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
487 _allocated_regions.clear();
488 _allocation_region = NULL;
489 };
|