< prev index next >

src/share/vm/gc/g1/g1Allocator.cpp

Print this page

        

@@ -27,12 +27,15 @@
 #include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
+#include "gc/g1/vm_operations_g1.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "runtime/vmThread.hpp"
 
 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
   G1Allocator(heap),
   _survivor_is_full(false),
   _old_is_full(false),

@@ -156,10 +159,582 @@
   } else {
     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
   }
 }
 
+inline bool G1Allocator::is_humongous(size_t word_size) {
+  return G1CollectedHeap::is_humongous(word_size);
+}
+
+HeapWord* G1Allocator::attempt_allocation_slow(size_t word_size,
+                                               AllocationContext_t context,
+                                               uint* gc_count_before_ret,
+                                               uint* gclocker_retry_count_ret) {
+  // Make sure you read the note in attempt_allocation_humongous().
+
+  assert_heap_not_locked_and_not_at_safepoint();
+  assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
+         "be called for humongous allocation requests");
+
+  // We should only get here after the first-level allocation attempt
+  // (attempt_allocation()) failed to allocate.
+
+  // We will loop until a) we manage to successfully perform the
+  // allocation or b) we successfully schedule a collection which
+  // fails to perform the allocation. b) is the only case when we'll
+  // return NULL.
+  HeapWord* result = NULL;
+  for (int try_count = 1; /* we'll return */; try_count += 1) {
+    bool should_try_gc;
+    uint gc_count_before;
+
+    {
+      MutexLockerEx x(Heap_lock);
+      result = attempt_allocation_locked(word_size, context);
+      if (result != NULL) {
+        return result;
+      }
+
+      if (GCLocker::is_active_and_needs_gc()) {
+        if (_g1h->g1_policy()->can_expand_young_list()) {
+          // No need for an ergo verbose message here,
+          // can_expand_young_list() does this when it returns true.
+          result = attempt_allocation_force(word_size, context);
+          if (result != NULL) {
+            return result;
+          }
+        }
+        should_try_gc = false;
+      } else {
+        // The GCLocker may not be active but the GCLocker initiated
+        // GC may not yet have been performed (GCLocker::needs_gc()
+        // returns true). In this case we do not try this GC and
+        // wait until the GCLocker initiated GC is performed, and
+        // then retry the allocation.
+        if (GCLocker::needs_gc()) {
+          should_try_gc = false;
+        } else {
+          // Read the GC count while still holding the Heap_lock.
+          gc_count_before = _g1h->total_collections();
+          should_try_gc = true;
+        }
+      }
+    }
+
+    if (should_try_gc) {
+      bool succeeded;
+      result = _g1h->do_collection_pause(word_size, gc_count_before, &succeeded,
+                                         GCCause::_g1_inc_collection_pause);
+      if (result != NULL) {
+        assert(succeeded, "only way to get back a non-NULL result");
+        return result;
+      }
+
+      if (succeeded) {
+        // If we get here we successfully scheduled a collection which
+        // failed to allocate. No point in trying to allocate
+        // further. We'll just return NULL.
+        MutexLockerEx x(Heap_lock);
+        *gc_count_before_ret = _g1h->total_collections();
+        return NULL;
+      }
+    } else {
+      if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
+        MutexLockerEx x(Heap_lock);
+        *gc_count_before_ret = _g1h->total_collections();
+        return NULL;
+      }
+      // The GCLocker is either active or the GCLocker initiated
+      // GC has not yet been performed. Stall until it is and
+      // then retry the allocation.
+      GCLocker::stall_until_clear();
+      (*gclocker_retry_count_ret) += 1;
+    }
+
+    // We can reach here if we were unsuccessful in scheduling a
+    // collection (because another thread beat us to it) or if we were
+    // stalled due to the GC locker. In either can we should retry the
+    // allocation attempt in case another thread successfully
+    // performed a collection and reclaimed enough space. We do the
+    // first attempt (without holding the Heap_lock) here and the
+    // follow-on attempt will be at the start of the next loop
+    // iteration (after taking the Heap_lock).
+    result = attempt_allocation(word_size, context);
+    if (result != NULL) {
+      return result;
+    }
+
+    // Give a warning if we seem to be looping forever.
+    if ((QueuedAllocationWarningCount > 0) &&
+        (try_count % QueuedAllocationWarningCount == 0)) {
+      warning("G1CollectedHeap::attempt_allocation_slow() "
+              "retries %d times", try_count);
+    }
+  }
+
+  ShouldNotReachHere();
+  return NULL;
+}
+
+HeapWord* G1Allocator::humongous_obj_allocate_initialize_regions(uint first,
+                                                                 uint num_regions,
+                                                                 size_t word_size,
+                                                                 AllocationContext_t context) {
+  assert(first != G1_NO_HRM_INDEX, "pre-condition");
+  assert(is_humongous(word_size), "word_size should be humongous");
+  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
+
+  // Index of last region in the series.
+  uint last = first + num_regions - 1;
+
+  // We need to initialize the region(s) we just discovered. This is
+  // a bit tricky given that it can happen concurrently with
+  // refinement threads refining cards on these regions and
+  // potentially wanting to refine the BOT as they are scanning
+  // those cards (this can happen shortly after a cleanup; see CR
+  // 6991377). So we have to set up the region(s) carefully and in
+  // a specific order.
+
+  // The word size sum of all the regions we will allocate.
+  size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
+  assert(word_size <= word_size_sum, "sanity");
+
+  // This will be the "starts humongous" region.
+  HeapRegion* first_hr = _g1h->region_at(first);
+  // The header of the new object will be placed at the bottom of
+  // the first region.
+  HeapWord* new_obj = first_hr->bottom();
+  // This will be the new top of the new object.
+  HeapWord* obj_top = new_obj + word_size;
+
+  // First, we need to zero the header of the space that we will be
+  // allocating. When we update top further down, some refinement
+  // threads might try to scan the region. By zeroing the header we
+  // ensure that any thread that will try to scan the region will
+  // come across the zero klass word and bail out.
+  //
+  // NOTE: It would not have been correct to have used
+  // CollectedHeap::fill_with_object() and make the space look like
+  // an int array. The thread that is doing the allocation will
+  // later update the object header to a potentially different array
+  // type and, for a very short period of time, the klass and length
+  // fields will be inconsistent. This could cause a refinement
+  // thread to calculate the object size incorrectly.
+  Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
+
+  // How many words we use for filler objects.
+  size_t word_fill_size = word_size_sum - word_size;
+
+  // How many words memory we "waste" which cannot hold a filler object.
+  size_t words_not_fillable = 0;
+
+  if (word_fill_size >= _g1h->min_fill_size()) {
+    _g1h->fill_with_objects(obj_top, word_fill_size);
+  } else if (word_fill_size > 0) {
+    // We have space to fill, but we cannot fit an object there.
+    words_not_fillable = word_fill_size;
+    word_fill_size = 0;
+  }
+
+  // We will set up the first region as "starts humongous". This
+  // will also update the BOT covering all the regions to reflect
+  // that there is a single object that starts at the bottom of the
+  // first region.
+  first_hr->set_starts_humongous(obj_top, word_fill_size);
+  first_hr->set_allocation_context(context);
+  // Then, if there are any, we will set up the "continues
+  // humongous" regions.
+  HeapRegion* hr = NULL;
+  for (uint i = first + 1; i <= last; ++i) {
+    hr = _g1h->region_at(i);
+    hr->set_continues_humongous(first_hr);
+    hr->set_allocation_context(context);
+  }
+
+  // Up to this point no concurrent thread would have been able to
+  // do any scanning on any region in this series. All the top
+  // fields still point to bottom, so the intersection between
+  // [bottom,top] and [card_start,card_end] will be empty. Before we
+  // update the top fields, we'll do a storestore to make sure that
+  // no thread sees the update to top before the zeroing of the
+  // object header and the BOT initialization.
+  OrderAccess::storestore();
+
+  // Now, we will update the top fields of the "continues humongous"
+  // regions except the last one.
+  for (uint i = first; i < last; ++i) {
+    hr = _g1h->region_at(i);
+    hr->set_top(hr->end());
+  }
+
+  hr = _g1h->region_at(last);
+  // If we cannot fit a filler object, we must set top to the end
+  // of the humongous object, otherwise we cannot iterate the heap
+  // and the BOT will not be complete.
+  hr->set_top(hr->end() - words_not_fillable);
+
+  assert(hr->bottom() < obj_top && obj_top <= hr->end(),
+         "obj_top should be in last region");
+
+  _g1h->verifier()->check_bitmaps("Humongous Region Allocation", first_hr);
+
+  assert(words_not_fillable == 0 ||
+         first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
+         "Miscalculation in humongous allocation");
+
+  _g1h->increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
+
+  for (uint i = first; i <= last; ++i) {
+    hr = _g1h->region_at(i);
+    _g1h->_humongous_set.add(hr);
+    _g1h->hr_printer()->alloc(hr);
+  }
+
+  return new_obj;
+}
+
+size_t G1Allocator::humongous_obj_size_in_regions(size_t word_size) {
+  assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
+  return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
+}
+
+HeapWord* G1Allocator::attempt_allocation_at_safepoint(size_t word_size,
+                                                       AllocationContext_t context,
+                                                       bool expect_null_mutator_alloc_region) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+  assert(!has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
+         "the current alloc region was unexpectedly found to be non-NULL");
+
+  if (!is_humongous(word_size)) {
+    return attempt_allocation_locked(word_size, context);
+  } else {
+    HeapWord* result = humongous_obj_allocate(word_size, context);
+    if (result != NULL && _g1h->g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
+      _g1h->collector_state()->set_initiate_conc_mark_if_possible(true);
+    }
+    return result;
+  }
+
+  ShouldNotReachHere();
+}
+
+// If could fit into free regions w/o expansion, try.
+// Otherwise, if can expand, do so.
+// Otherwise, if using ex regions might help, try with ex given back.
+HeapWord* G1Allocator::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
+  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
+
+  _g1h->verifier()->verify_region_sets_optional();
+
+  uint first = G1_NO_HRM_INDEX;
+  uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
+
+  if (obj_regions == 1) {
+    // Only one region to allocate, try to use a fast path by directly allocating
+    // from the free lists. Do not try to expand here, we will potentially do that
+    // later.
+    HeapRegion* hr = _g1h->new_region(word_size, true /* is_old */, false /* do_expand */);
+    if (hr != NULL) {
+      first = hr->hrm_index();
+    }
+  } else {
+    // We can't allocate humongous regions spanning more than one region while
+    // cleanupComplete() is running, since some of the regions we find to be
+    // empty might not yet be added to the free list. It is not straightforward
+    // to know in which list they are on so that we can remove them. We only
+    // need to do this if we need to allocate more than one region to satisfy the
+    // current humongous allocation request. If we are only allocating one region
+    // we use the one-region region allocation code (see above), that already
+    // potentially waits for regions from the secondary free list.
+    _g1h->wait_while_free_regions_coming();
+    _g1h->append_secondary_free_list_if_not_empty_with_lock();
+
+    // Policy: Try only empty regions (i.e. already committed first). Maybe we
+    // are lucky enough to find some.
+    first = _g1h->_hrm.find_contiguous_only_empty(obj_regions);
+    if (first != G1_NO_HRM_INDEX) {
+      _g1h->_hrm.allocate_free_regions_starting_at(first, obj_regions);
+    }
+  }
+
+  if (first == G1_NO_HRM_INDEX) {
+    // Policy: We could not find enough regions for the humongous object in the
+    // free list. Look through the heap to find a mix of free and uncommitted regions.
+    // If so, try expansion.
+    first = _g1h->_hrm.find_contiguous_empty_or_unavailable(obj_regions);
+    if (first != G1_NO_HRM_INDEX) {
+      // We found something. Make sure these regions are committed, i.e. expand
+      // the heap. Alternatively we could do a defragmentation GC.
+      log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
+                                    word_size * HeapWordSize);
+
+
+      _g1h->_hrm.expand_at(first, obj_regions);
+      _g1h->g1_policy()->record_new_heap_size(_g1h->num_regions());
+
+#ifdef ASSERT
+      for (uint i = first; i < first + obj_regions; ++i) {
+        HeapRegion* hr = _g1h->region_at(i);
+        assert(hr->is_free(), "sanity");
+        assert(hr->is_empty(), "sanity");
+        assert(_g1h->is_on_master_free_list(hr), "sanity");
+      }
+#endif
+      _g1h->_hrm.allocate_free_regions_starting_at(first, obj_regions);
+    } else {
+      // Policy: Potentially trigger a defragmentation GC.
+    }
+  }
+
+  HeapWord* result = NULL;
+  if (first != G1_NO_HRM_INDEX) {
+    result = humongous_obj_allocate_initialize_regions(first, obj_regions,
+                                                       word_size, context);
+    assert(result != NULL, "it should always return a valid result");
+
+    // A successful humongous object allocation changes the used space
+    // information of the old generation so we need to recalculate the
+    // sizes and update the jstat counters here.
+    _g1h->g1mm()->update_sizes();
+  }
+
+  _g1h->verifier()->verify_region_sets_optional();
+
+  return result;
+}
+
+
+HeapWord* G1Allocator::attempt_allocation_humongous(size_t word_size,
+                                                    uint* gc_count_before_ret,
+                                                    uint* gclocker_retry_count_ret) {
+  // The structure of this method has a lot of similarities to
+  // attempt_allocation_slow(). The reason these two were not merged
+  // into a single one is that such a method would require several "if
+  // allocation is not humongous do this, otherwise do that"
+  // conditional paths which would obscure its flow. In fact, an early
+  // version of this code did use a unified method which was harder to
+  // follow and, as a result, it had subtle bugs that were hard to
+  // track down. So keeping these two methods separate allows each to
+  // be more readable. It will be good to keep these two in sync as
+  // much as possible.
+
+  assert_heap_not_locked_and_not_at_safepoint();
+  assert(is_humongous(word_size), "attempt_allocation_humongous() "
+         "should only be called for humongous allocations");
+
+  // Humongous objects can exhaust the heap quickly, so we should check if we
+  // need to start a marking cycle at each humongous object allocation. We do
+  // the check before we do the actual allocation. The reason for doing it
+  // before the allocation is that we avoid having to keep track of the newly
+  // allocated memory while we do a GC.
+  if (_g1h->g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
+                                           word_size)) {
+    _g1h->collect(GCCause::_g1_humongous_allocation);
+  }
+
+  // We will loop until a) we manage to successfully perform the
+  // allocation or b) we successfully schedule a collection which
+  // fails to perform the allocation. b) is the only case when we'll
+  // return NULL.
+  HeapWord* result = NULL;
+  for (int try_count = 1; /* we'll return */; try_count += 1) {
+    bool should_try_gc;
+    uint gc_count_before;
+
+    {
+      MutexLockerEx x(Heap_lock);
+
+      // Given that humongous objects are not allocated in young
+      // regions, we'll first try to do the allocation without doing a
+      // collection hoping that there's enough space in the heap.
+      result = humongous_obj_allocate(word_size, AllocationContext::current());
+      if (result != NULL) {
+        size_t size_in_regions = humongous_obj_size_in_regions(word_size);
+        _g1h->g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
+        return result;
+      }
+
+      if (GCLocker::is_active_and_needs_gc()) {
+        should_try_gc = false;
+      } else {
+         // The GCLocker may not be active but the GCLocker initiated
+        // GC may not yet have been performed (GCLocker::needs_gc()
+        // returns true). In this case we do not try this GC and
+        // wait until the GCLocker initiated GC is performed, and
+        // then retry the allocation.
+        if (GCLocker::needs_gc()) {
+          should_try_gc = false;
+        } else {
+          // Read the GC count while still holding the Heap_lock.
+          gc_count_before = _g1h->total_collections();
+          should_try_gc = true;
+        }
+      }
+    }
+
+    if (should_try_gc) {
+      // If we failed to allocate the humongous object, we should try to
+      // do a collection pause (if we're allowed) in case it reclaims
+      // enough space for the allocation to succeed after the pause.
+
+      bool succeeded;
+      result = _g1h->do_collection_pause(word_size, gc_count_before, &succeeded,
+                                   GCCause::_g1_humongous_allocation);
+      if (result != NULL) {
+        assert(succeeded, "only way to get back a non-NULL result");
+        return result;
+      }
+
+      if (succeeded) {
+        // If we get here we successfully scheduled a collection which
+        // failed to allocate. No point in trying to allocate
+        // further. We'll just return NULL.
+        MutexLockerEx x(Heap_lock);
+        *gc_count_before_ret = _g1h->total_collections();
+        return NULL;
+      }
+    } else {
+      if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
+        MutexLockerEx x(Heap_lock);
+        *gc_count_before_ret = _g1h->total_collections();
+        return NULL;
+      }
+      // The GCLocker is either active or the GCLocker initiated
+      // GC has not yet been performed. Stall until it is and
+      // then retry the allocation.
+      GCLocker::stall_until_clear();
+      (*gclocker_retry_count_ret) += 1;
+    }
+
+    // We can reach here if we were unsuccessful in scheduling a
+    // collection (because another thread beat us to it) or if we were
+    // stalled due to the GC locker. In either can we should retry the
+    // allocation attempt in case another thread successfully
+    // performed a collection and reclaimed enough space.  Give a
+    // warning if we seem to be looping forever.
+
+    if ((QueuedAllocationWarningCount > 0) &&
+        (try_count % QueuedAllocationWarningCount == 0)) {
+      warning("G1CollectedHeap::attempt_allocation_humongous() "
+              "retries %d times", try_count);
+    }
+  }
+
+  ShouldNotReachHere();
+  return NULL;
+}
+
+
+HeapWord* G1Allocator::mem_allocate(size_t word_size,
+                                    bool*  gc_overhead_limit_was_exceeded) {
+  assert_heap_not_locked_and_not_at_safepoint();
+
+  // Loop until the allocation is satisfied, or unsatisfied after GC.
+  for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
+    uint gc_count_before;
+
+    HeapWord* result = NULL;
+    if (!is_humongous(word_size)) {
+      result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
+    } else {
+      result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
+    }
+    if (result != NULL) {
+      return result;
+    }
+
+    // Create the garbage collection operation...
+    VM_G1CollectForAllocation op(gc_count_before, word_size);
+    op.set_allocation_context(AllocationContext::current());
+
+    // ...and get the VM thread to execute it.
+    VMThread::execute(&op);
+
+    if (op.prologue_succeeded() && op.pause_succeeded()) {
+      // If the operation was successful we'll return the result even
+      // if it is NULL. If the allocation attempt failed immediately
+      // after a Full GC, it's unlikely we'll be able to allocate now.
+      HeapWord* result = op.result();
+      if (result != NULL && !is_humongous(word_size)) {
+        // Allocations that take place on VM operations do not do any
+        // card dirtying and we have to do it here. We only have to do
+        // this for non-humongous allocations, though.
+        _g1h->dirty_young_block(result, word_size);
+      }
+      return result;
+    } else {
+      if (gclocker_retry_count > GCLockerRetryAllocationCount) {
+        return NULL;
+      }
+      assert(op.result() == NULL,
+             "the result should be NULL if the VM op did not succeed");
+    }
+
+    // Give a warning if we seem to be looping forever.
+    if ((QueuedAllocationWarningCount > 0) &&
+        (try_count % QueuedAllocationWarningCount == 0)) {
+      warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
+    }
+  }
+
+  ShouldNotReachHere();
+  return NULL;
+}
+
+#ifndef PRODUCT
+void G1Allocator::allocate_dummy_regions(size_t word_size) {
+  // And as a result the region we'll allocate will be humongous.
+  guarantee(is_humongous(word_size), "sanity");
+
+  for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
+    // Let's use the existing mechanism for the allocation
+    HeapWord* dummy_obj = humongous_obj_allocate(word_size,
+                                                 AllocationContext::system());
+    if (dummy_obj != NULL) {
+      MemRegion mr(dummy_obj, word_size);
+      CollectedHeap::fill_with_object(mr);
+    } else {
+      // If we can't allocate once, we probably cannot allocate
+      // again. Let's get out of the loop.
+      break;
+    }
+  }
+}
+#endif // !PRODUCT
+
+inline HeapWord* G1Allocator::attempt_allocation(size_t word_size,
+                                                 uint* gc_count_before_ret,
+                                                 uint* gclocker_retry_count_ret) {
+  assert_heap_not_locked_and_not_at_safepoint();
+  assert(!is_humongous(word_size), "attempt_allocation() should not "
+         "be called for humongous allocation requests");
+
+  AllocationContext_t context = AllocationContext::current();
+  HeapWord* result = attempt_allocation(word_size, context);
+
+  if (result == NULL) {
+    result = attempt_allocation_slow(word_size,
+                                     context,
+                                     gc_count_before_ret,
+                                     gclocker_retry_count_ret);
+  }
+  assert_heap_not_locked();
+  if (result != NULL) {
+    _g1h->dirty_young_block(result, word_size);
+  }
+  return result;
+}
+
+HeapWord* G1Allocator::allocate_new_tlab(size_t word_size) {
+  assert_heap_not_locked_and_not_at_safepoint();
+  assert(!is_humongous(word_size), "we do not allow humongous TLABs");
+
+  uint dummy_gc_count_before;
+  uint dummy_gclocker_retry_count = 0;
+  return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
+}
+
 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
                                               size_t word_size,
                                               AllocationContext_t context) {
   size_t temp = 0;
   HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);

@@ -187,11 +762,11 @@
 
 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
                                                    size_t desired_word_size,
                                                    size_t* actual_word_size,
                                                    AllocationContext_t context) {
-  assert(!_g1h->is_humongous(desired_word_size),
+  assert(!is_humongous(desired_word_size),
          "we should not be seeing humongous-size allocations in this path");
 
   HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
                                                                            desired_word_size,
                                                                            actual_word_size,

@@ -214,11 +789,11 @@
 
 HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
                                               size_t desired_word_size,
                                               size_t* actual_word_size,
                                               AllocationContext_t context) {
-  assert(!_g1h->is_humongous(desired_word_size),
+  assert(!is_humongous(desired_word_size),
          "we should not be seeing humongous-size allocations in this path");
 
   HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
                                                                       desired_word_size,
                                                                       actual_word_size,
< prev index next >