< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page

        

@@ -201,11 +201,12 @@
      CardTableRS* ct, bool use_adaptive_freelists,
      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   CardGeneration(rs, initial_byte_size, level, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   _debug_collection_type(Concurrent_collection_type),
-  _did_compact(false)
+  _did_compact(false),
+  _has_promotion_failed(false)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   HeapWord* end    = (HeapWord*) _virtual_space.high();
 
   _direct_allocated_words = 0;

@@ -873,10 +874,13 @@
 // (cms old generation).
 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
   if (CMSDumpAtPromotionFailure) {
     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
   }
+  if (CMSFastPromotionFailure) {
+    reset_promotion_failed();
+  }
 }
 
 CompactibleSpace*
 ConcurrentMarkSweepGeneration::first_compaction_space() const {
   return _cmsSpace;

@@ -1347,10 +1351,13 @@
 #ifndef PRODUCT
   if (Universe::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
+  if (CMSFastPromotionFailure && has_promotion_failed()) {
+    return NULL;
+  }
 
   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
   PromotionInfo* promoInfo = &ps->promo;
   // if we are tracking promotions, then first ensure space for
   // promotion (including spooling space for saving header if necessary).

@@ -3345,18 +3352,30 @@
     }
   }
 }
 
 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
+  // Fast-path for promotion failure outside the lock.
+  if (CMSFastPromotionFailure && has_promotion_failed()) {
+    return NULL;
+  }
+
   HeapWord* res = NULL;
   MutexLocker x(ParGCRareEvent_lock);
+
+  // Check again here while holding the lock.
+  if (CMSFastPromotionFailure && has_promotion_failed()) {
+    return NULL;
+  }
+
   while (true) {
     // Expansion by some other thread might make alloc OK now:
     res = ps->lab.alloc(word_sz);
     if (res != NULL) return res;
     // If there's not enough expansion space available, give up.
     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
+      set_promotion_failed();
       return NULL;
     }
     // Otherwise, we try expansion.
     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
       CMSExpansionCause::_allocate_par_lab);

@@ -3371,20 +3390,27 @@
 
 
 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
   PromotionInfo* promo) {
   MutexLocker x(ParGCRareEvent_lock);
+
+  // Check again here while holding the lock.
+  if (CMSFastPromotionFailure && has_promotion_failed()) {
+    return false;
+  }
+
   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
   while (true) {
     // Expansion by some other thread might make alloc OK now:
     if (promo->ensure_spooling_space()) {
       assert(promo->has_spooling_space(),
              "Post-condition of successful ensure_spooling_space()");
       return true;
     }
     // If there's not enough expansion space available, give up.
     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
+      set_promotion_failed();
       return false;
     }
     // Otherwise, we try expansion.
     expand(refill_size_bytes, MinHeapDeltaBytes,
       CMSExpansionCause::_allocate_par_spooling_space);
< prev index next >