< prev index next >

src/hotspot/share/gc/g1/g1Allocator.cpp

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Allocator.inline.hpp"
  27 #include "gc/g1/g1AllocRegion.inline.hpp"
  28 #include "gc/g1/g1EvacStats.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1Policy.hpp"
  31 #include "gc/g1/heapRegion.inline.hpp"
  32 #include "gc/g1/heapRegionSet.inline.hpp"
  33 #include "gc/g1/heapRegionType.hpp"

  34 #include "utilities/align.hpp"
  35 
  36 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
  37   _g1h(heap),
  38   _survivor_is_full(false),
  39   _old_is_full(false),
  40   _mutator_alloc_region(),
  41   _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
  42   _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)),
  43   _retained_old_gc_alloc_region(NULL) {
  44 }
  45 
  46 void G1Allocator::init_mutator_alloc_region() {
  47   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  48   _mutator_alloc_region.init();
  49 }
  50 
  51 void G1Allocator::release_mutator_alloc_region() {
  52   _mutator_alloc_region.release();
  53   assert(_mutator_alloc_region.get() == NULL, "post-condition");


 389   HeapWord* old_top = _allocation_region->top();
 390   assert(_bottom >= _allocation_region->bottom(),
 391          "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
 392          p2i(_bottom), p2i(_allocation_region->bottom()));
 393   assert(_max <= _allocation_region->end(),
 394          "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
 395          p2i(_max), p2i(_allocation_region->end()));
 396   assert(_bottom <= old_top && old_top <= _max,
 397          "inconsistent allocation state: expected "
 398          PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
 399          p2i(_bottom), p2i(old_top), p2i(_max));
 400 
 401   // Allocate the next word_size words in the current allocation chunk.
 402   // If allocation would cross the _max boundary, insert a filler and begin
 403   // at the base of the next min_region_size'd chunk. Also advance to the next
 404   // chunk if we don't yet cross the boundary, but the remainder would be too
 405   // small to fill.
 406   HeapWord* new_top = old_top + word_size;
 407   size_t remainder = pointer_delta(_max, new_top);
 408   if ((new_top > _max) ||
 409       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
 410     if (old_top != _max) {
 411       size_t fill_size = pointer_delta(_max, old_top);
 412       CollectedHeap::fill_with_object(old_top, fill_size);
 413       _summary_bytes_used += fill_size * HeapWordSize;
 414     }
 415     _allocation_region->set_top(_max);
 416     old_top = _bottom = _max;
 417 
 418     // Check if we've just used up the last min_region_size'd chunk
 419     // in the current region, and if so, allocate a new one.
 420     if (_bottom != _allocation_region->end()) {
 421       _max = _bottom + HeapRegion::min_region_size_in_words();
 422     } else {
 423       if (!alloc_new_region()) {
 424         return NULL;
 425       }
 426       old_top = _allocation_region->bottom();
 427     }
 428   }
 429   _allocation_region->set_top(old_top + word_size);
 430   _summary_bytes_used += word_size * HeapWordSize;
 431 
 432   return old_top;
 433 }
 434 
 435 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
 436                                           size_t end_alignment_in_bytes) {
 437   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 438          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
 439   assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
 440          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
 441 
 442   // If we've allocated nothing, simply return.
 443   if (_allocation_region == NULL) {
 444     return;
 445   }
 446 
 447   // If an end alignment was requested, insert filler objects.
 448   if (end_alignment_in_bytes != 0) {
 449     HeapWord* currtop = _allocation_region->top();
 450     HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
 451     size_t fill_size = pointer_delta(newtop, currtop);
 452     if (fill_size != 0) {
 453       if (fill_size < CollectedHeap::min_fill_size()) {
 454         // If the required fill is smaller than we can represent,
 455         // bump up to the next aligned address. We know we won't exceed the current
 456         // region boundary because the max supported alignment is smaller than the min
 457         // region size, and because the allocation code never leaves space smaller than
 458         // the min_fill_size at the top of the current allocation region.
 459         newtop = align_up(currtop + CollectedHeap::min_fill_size(),
 460                           end_alignment_in_bytes);
 461         fill_size = pointer_delta(newtop, currtop);
 462       }
 463       HeapWord* fill = archive_mem_allocate(fill_size);
 464       CollectedHeap::fill_with_objects(fill, fill_size);
 465     }
 466   }
 467 
 468   // Loop through the allocated regions, and create MemRegions summarizing
 469   // the allocated address range, combining contiguous ranges. Add the
 470   // MemRegions to the GrowableArray provided by the caller.
 471   int index = _allocated_regions.length() - 1;
 472   assert(_allocated_regions.at(index) == _allocation_region,
 473          "expected region %u at end of array, found %u",
 474          _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
 475   HeapWord* base_address = _allocation_region->bottom();
 476   HeapWord* top = base_address;
 477 
 478   while (index >= 0) {
 479     HeapRegion* next = _allocated_regions.at(index);
 480     HeapWord* new_base = next->bottom();
 481     HeapWord* new_top = next->top();
 482     if (new_base != top) {
 483       ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 484       base_address = new_base;


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Allocator.inline.hpp"
  27 #include "gc/g1/g1AllocRegion.inline.hpp"
  28 #include "gc/g1/g1EvacStats.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1Policy.hpp"
  31 #include "gc/g1/heapRegion.inline.hpp"
  32 #include "gc/g1/heapRegionSet.inline.hpp"
  33 #include "gc/g1/heapRegionType.hpp"
  34 #include "gc/shared/fill.hpp"
  35 #include "utilities/align.hpp"
  36 
  37 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
  38   _g1h(heap),
  39   _survivor_is_full(false),
  40   _old_is_full(false),
  41   _mutator_alloc_region(),
  42   _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
  43   _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)),
  44   _retained_old_gc_alloc_region(NULL) {
  45 }
  46 
  47 void G1Allocator::init_mutator_alloc_region() {
  48   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  49   _mutator_alloc_region.init();
  50 }
  51 
  52 void G1Allocator::release_mutator_alloc_region() {
  53   _mutator_alloc_region.release();
  54   assert(_mutator_alloc_region.get() == NULL, "post-condition");


 390   HeapWord* old_top = _allocation_region->top();
 391   assert(_bottom >= _allocation_region->bottom(),
 392          "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
 393          p2i(_bottom), p2i(_allocation_region->bottom()));
 394   assert(_max <= _allocation_region->end(),
 395          "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
 396          p2i(_max), p2i(_allocation_region->end()));
 397   assert(_bottom <= old_top && old_top <= _max,
 398          "inconsistent allocation state: expected "
 399          PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
 400          p2i(_bottom), p2i(old_top), p2i(_max));
 401 
 402   // Allocate the next word_size words in the current allocation chunk.
 403   // If allocation would cross the _max boundary, insert a filler and begin
 404   // at the base of the next min_region_size'd chunk. Also advance to the next
 405   // chunk if we don't yet cross the boundary, but the remainder would be too
 406   // small to fill.
 407   HeapWord* new_top = old_top + word_size;
 408   size_t remainder = pointer_delta(_max, new_top);
 409   if ((new_top > _max) ||
 410       ((new_top < _max) && (remainder < Fill::min_size()))) {
 411     if (old_top != _max) {
 412       size_t fill_size = pointer_delta(_max, old_top);
 413       Fill::range(old_top, fill_size);
 414       _summary_bytes_used += fill_size * HeapWordSize;
 415     }
 416     _allocation_region->set_top(_max);
 417     old_top = _bottom = _max;
 418 
 419     // Check if we've just used up the last min_region_size'd chunk
 420     // in the current region, and if so, allocate a new one.
 421     if (_bottom != _allocation_region->end()) {
 422       _max = _bottom + HeapRegion::min_region_size_in_words();
 423     } else {
 424       if (!alloc_new_region()) {
 425         return NULL;
 426       }
 427       old_top = _allocation_region->bottom();
 428     }
 429   }
 430   _allocation_region->set_top(old_top + word_size);
 431   _summary_bytes_used += word_size * HeapWordSize;
 432 
 433   return old_top;
 434 }
 435 
 436 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
 437                                           size_t end_alignment_in_bytes) {
 438   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 439          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
 440   assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
 441          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
 442 
 443   // If we've allocated nothing, simply return.
 444   if (_allocation_region == NULL) {
 445     return;
 446   }
 447 
 448   // If an end alignment was requested, insert filler objects.
 449   if (end_alignment_in_bytes != 0) {
 450     HeapWord* currtop = _allocation_region->top();
 451     HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
 452     size_t fill_size = pointer_delta(newtop, currtop);
 453     if (fill_size != 0) {
 454       if (fill_size < Fill::min_size()) {
 455         // If the required fill is smaller than we can represent,
 456         // bump up to the next aligned address. We know we won't exceed the current
 457         // region boundary because the max supported alignment is smaller than the min
 458         // region size, and because the allocation code never leaves space smaller than
 459         // the Fill::min_size() at the top of the current allocation region.
 460         newtop = align_up(currtop + Fill::min_size(), end_alignment_in_bytes);

 461         fill_size = pointer_delta(newtop, currtop);
 462       }
 463       HeapWord* fill = archive_mem_allocate(fill_size);
 464       Fill::range(fill, fill_size);
 465     }
 466   }
 467 
 468   // Loop through the allocated regions, and create MemRegions summarizing
 469   // the allocated address range, combining contiguous ranges. Add the
 470   // MemRegions to the GrowableArray provided by the caller.
 471   int index = _allocated_regions.length() - 1;
 472   assert(_allocated_regions.at(index) == _allocation_region,
 473          "expected region %u at end of array, found %u",
 474          _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
 475   HeapWord* base_address = _allocation_region->bottom();
 476   HeapWord* top = base_address;
 477 
 478   while (index >= 0) {
 479     HeapRegion* next = _allocated_regions.at(index);
 480     HeapWord* new_base = next->bottom();
 481     HeapWord* new_top = next->top();
 482     if (new_base != top) {
 483       ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
 484       base_address = new_base;
< prev index next >