< prev index next >

src/share/vm/gc/g1/g1Allocator.cpp

Print this page




 408     // in the current region, and if so, allocate a new one.
 409     if (_bottom != _allocation_region->end()) {
 410       _max = _bottom + HeapRegion::min_region_size_in_words();
 411     } else {
 412       if (!alloc_new_region()) {
 413         return NULL;
 414       }
 415       old_top = _allocation_region->bottom();
 416     }
 417   }
 418   _allocation_region->set_top(old_top + word_size);
 419   _summary_bytes_used += word_size * HeapWordSize;
 420 
 421   return old_top;
 422 }
 423 
 424 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
 425                                           size_t end_alignment_in_bytes) {
 426   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 427          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
 428   assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
 429          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
 430 
 431   // If we've allocated nothing, simply return.
 432   if (_allocation_region == NULL) {
 433     return;
 434   }
 435 
 436   // If an end alignment was requested, insert filler objects.
 437   if (end_alignment_in_bytes != 0) {
 438     HeapWord* currtop = _allocation_region->top();
 439     HeapWord* newtop = align_ptr_up(currtop, end_alignment_in_bytes);
 440     size_t fill_size = pointer_delta(newtop, currtop);
 441     if (fill_size != 0) {
 442       if (fill_size < CollectedHeap::min_fill_size()) {
 443         // If the required fill is smaller than we can represent,
 444         // bump up to the next aligned address. We know we won't exceed the current
 445         // region boundary because the max supported alignment is smaller than the min
 446         // region size, and because the allocation code never leaves space smaller than
 447         // the min_fill_size at the top of the current allocation region.
 448         newtop = align_ptr_up(currtop + CollectedHeap::min_fill_size(),
 449                               end_alignment_in_bytes);
 450         fill_size = pointer_delta(newtop, currtop);
 451       }
 452       HeapWord* fill = archive_mem_allocate(fill_size);
 453       CollectedHeap::fill_with_objects(fill, fill_size);
 454     }
 455   }
 456 
 457   // Loop through the allocated regions, and create MemRegions summarizing
 458   // the allocated address range, combining contiguous ranges. Add the
 459   // MemRegions to the GrowableArray provided by the caller.
 460   int index = _allocated_regions.length() - 1;
 461   assert(_allocated_regions.at(index) == _allocation_region,
 462          "expected region %u at end of array, found %u",
 463          _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
 464   HeapWord* base_address = _allocation_region->bottom();
 465   HeapWord* top = base_address;
 466 
 467   while (index >= 0) {
 468     HeapRegion* next = _allocated_regions.at(index);


 408     // in the current region, and if so, allocate a new one.
 409     if (_bottom != _allocation_region->end()) {
 410       _max = _bottom + HeapRegion::min_region_size_in_words();
 411     } else {
 412       if (!alloc_new_region()) {
 413         return NULL;
 414       }
 415       old_top = _allocation_region->bottom();
 416     }
 417   }
 418   _allocation_region->set_top(old_top + word_size);
 419   _summary_bytes_used += word_size * HeapWordSize;
 420 
 421   return old_top;
 422 }
 423 
 424 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
 425                                           size_t end_alignment_in_bytes) {
 426   assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
 427          "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
 428   assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
 429          "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
 430 
 431   // If we've allocated nothing, simply return.
 432   if (_allocation_region == NULL) {
 433     return;
 434   }
 435 
 436   // If an end alignment was requested, insert filler objects.
 437   if (end_alignment_in_bytes != 0) {
 438     HeapWord* currtop = _allocation_region->top();
 439     HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
 440     size_t fill_size = pointer_delta(newtop, currtop);
 441     if (fill_size != 0) {
 442       if (fill_size < CollectedHeap::min_fill_size()) {
 443         // If the required fill is smaller than we can represent,
 444         // bump up to the next aligned address. We know we won't exceed the current
 445         // region boundary because the max supported alignment is smaller than the min
 446         // region size, and because the allocation code never leaves space smaller than
 447         // the min_fill_size at the top of the current allocation region.
 448         newtop = align_up(currtop + CollectedHeap::min_fill_size(),
 449                               end_alignment_in_bytes);
 450         fill_size = pointer_delta(newtop, currtop);
 451       }
 452       HeapWord* fill = archive_mem_allocate(fill_size);
 453       CollectedHeap::fill_with_objects(fill, fill_size);
 454     }
 455   }
 456 
 457   // Loop through the allocated regions, and create MemRegions summarizing
 458   // the allocated address range, combining contiguous ranges. Add the
 459   // MemRegions to the GrowableArray provided by the caller.
 460   int index = _allocated_regions.length() - 1;
 461   assert(_allocated_regions.at(index) == _allocation_region,
 462          "expected region %u at end of array, found %u",
 463          _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
 464   HeapWord* base_address = _allocation_region->bottom();
 465   HeapWord* top = base_address;
 466 
 467   while (index >= 0) {
 468     HeapRegion* next = _allocated_regions.at(index);
< prev index next >