< prev index next >

src/share/vm/gc/g1/g1Allocator.cpp

Print this page
rev 8461 : imported patch webrev1.patch
rev 8462 : [mq]: version3

@@ -44,11 +44,11 @@
                                             OldGCAllocRegion* old,
                                             HeapRegion** retained_old) {
   HeapRegion* retained_region = *retained_old;
   *retained_old = NULL;
   assert(retained_region == NULL || !retained_region->is_archive(),
-         "Archive region should not be alloc region");
+         err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
 
   // We will discard the current GC alloc region if:
   // a) it's in the collection set (it can happen!),
   // b) it's already full (no point in using it),
   // c) it's empty (this means that it was emptied during

@@ -170,25 +170,27 @@
       undo_wasted += buf->undo_waste();
     }
   }
 }
 
-
 G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
   // Create the archive allocator, and also enable archive object checking
   // in mark-sweep, since we will be creating archive regions.
   G1ArchiveAllocator* result =  new G1ArchiveAllocator(g1h);
   G1MarkSweep::enable_archive_object_check();
   return result;
 }
 
-HeapRegion* G1ArchiveAllocator::alloc_new_region() {
-  // Allocate the highest available region in the reserved heap,
+bool G1ArchiveAllocator::alloc_new_region() {
+  // Allocate the highest free region in the reserved heap,
   // and add it to our list of allocated regions.  It is marked
   // archive and added to the old set.
-  HeapRegion* hr = _g1h->alloc_highest_available_region();
-  assert(hr->top() == hr->bottom(), "expected empty region");
+  HeapRegion* hr = _g1h->alloc_highest_free_region();
+  if (hr == NULL) {
+    return false;
+  }
+  assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
   hr->set_archive();
   _g1h->_old_set.add(hr);
   _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
   _allocated_regions.append(hr);
   _allocation_region = hr;

@@ -197,20 +199,23 @@
   // min_region_size'd chunk of the allocated G1 region.
   _bottom = hr->bottom();
   _max = _bottom + HeapRegion::min_region_size_in_words();
 
   // Tell mark-sweep that objects in this region are not to be marked.
-  G1MarkSweep::mark_range_archive(_bottom, hr->end() - 1);
+  G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
 
   // Since we've modified the old set, call update_sizes.
   _g1h->g1mm()->update_sizes();
-  return hr;
+  return true;
 }
 
 HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
+  assert(word_size != 0, "size must not be zero");
   if (_allocation_region == NULL) {
-    alloc_new_region();
+    if (!alloc_new_region()) {
+      return NULL;
+    }
   }
   HeapWord* old_top = _allocation_region->top();
   assert(_bottom >= _allocation_region->bottom(), 
          err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
                  p2i(_bottom), p2i(_allocation_region->bottom())));

@@ -221,20 +226,20 @@
          err_msg("inconsistent allocation state: expected "
                  PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
                  p2i(_bottom), p2i(old_top), p2i(_max)));
 
   // Allocate the next word_size words in the current allocation chunk.
-  // If allocation would cross the _max boundary, insert a fill and begin
+  // If allocation would cross the _max boundary, insert a filler and begin
   // at the base of the next min_region_size'd chunk. Also advance to the next
   // chunk if we don't yet cross the boundary, but the remainder would be too 
   // small to fill.
   HeapWord* new_top = old_top + word_size;
-  size_t remainder = (size_t)(_max - new_top);
+  size_t remainder = pointer_delta(_max, new_top);
   if ((new_top > _max) || 
       ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
     if (old_top != _max) {
-      size_t fill_size = _max - old_top;
+      size_t fill_size = pointer_delta(_max, old_top);
       CollectedHeap::fill_with_object(old_top, fill_size);
       _summary_bytes_used += fill_size * HeapWordSize;
     }
     _allocation_region->set_top(_max);
     old_top = _bottom = _max;

@@ -242,61 +247,77 @@
     // Check if we've just used up the last min_region_size'd chunk 
     // in the current region, and if so, allocate a new one.
     if (_bottom != _allocation_region->end()) {
       _max = _bottom + HeapRegion::min_region_size_in_words();
     } else {
-      alloc_new_region();
+      if (!alloc_new_region()) {
+        return NULL;
+      }
       old_top = _allocation_region->bottom();
     }
   }
   _allocation_region->set_top(old_top + word_size);
   _summary_bytes_used += word_size * HeapWordSize;
 
   return old_top;
 }
 
 void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
-                                          uint end_alignment) {
-  assert((end_alignment >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
-         "alignment too large");
+                                          size_t end_alignment_in_bytes) {
+  assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
+         err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
+  assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
+         err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
+
   // If we've allocated nothing, simply return.
   if (_allocation_region == NULL) {
     return;
   }
 
   // If an end alignment was requested, insert filler objects.
-  if (end_alignment != 0) {
+  if (end_alignment_in_bytes != 0) {
     HeapWord* currtop = _allocation_region->top();
-    HeapWord* newtop = (HeapWord*)round_to((intptr_t)currtop, end_alignment);
-    size_t fill_size = newtop - currtop;
+    HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
+    size_t fill_size = pointer_delta(newtop, currtop);
     if (fill_size != 0) {
+      if (fill_size < CollectedHeap::min_fill_size()) {
+        // If the required fill is smaller than we can represent,
+        // bump up to the next aligned address. We know we won't exceed the current
+        // region boundary because the max supported alignment is smaller than the min
+        // region size, and because the allocation code never leaves space smaller than
+        // the min_fill_size at the top of the current allocation region.
+        newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(), 
+                                             end_alignment_in_bytes);
+        fill_size = pointer_delta(newtop, currtop);
+      }
       HeapWord* fill = archive_mem_allocate(fill_size);
       CollectedHeap::fill_with_objects(fill, fill_size);
     }
   }
 
   // Loop through the allocated regions, and create MemRegions summarizing
   // the allocated address range, combining contiguous ranges.  Add the
-  // MemRegions to the growable array provided by the caller.
+  // MemRegions to the GrowableArray provided by the caller.
   int index = _allocated_regions.length() - 1;
-  assert(_allocated_regions.at(index) == _allocation_region, "expect current region at end of array");
+  assert(_allocated_regions.at(index) == _allocation_region,
+         err_msg("expected region %u at end of array, found %u",
+                 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
   HeapWord* base_address = _allocation_region->bottom();
   HeapWord* top = base_address;
 
   while (index >= 0) {
-    HeapRegion* next = _allocated_regions.at(index--);
+    HeapRegion* next = _allocated_regions.at(index);
     HeapWord* new_base = next->bottom();
     HeapWord* new_top = next->top();
     if (new_base != top) {
-      ranges->append(MemRegion(base_address, top - base_address));
+      ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
       base_address = new_base;
     }
     top = new_top;
+    index = index - 1;
   }
 
-  ranges->append(MemRegion(base_address, top - base_address));
+  assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
+  ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
   _allocated_regions.clear();
   _allocation_region = NULL;
-
-  return;
-
 };
< prev index next >