< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page

        

*** 265,274 **** --- 265,277 ---- // promoting generation, we'll instead just use the minimum // object size (which today is a header's worth of space); // note that all arithmetic is in units of HeapWords. assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); assert(_dilatation_factor >= 1.0, "from previous assert"); + + // Support for CMSFastPromotionFailure + reset_promotion_failed(); } // The field "_initiating_occupancy" represents the occupancy percentage // at which we trigger a new collection cycle. Unless explicitly specified
*** 873,882 **** --- 876,888 ---- // (cms old generation). void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { if (CMSDumpAtPromotionFailure) { cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty); } + if (CMSFastPromotionFailure) { + reset_promotion_failed(); + } } CompactibleSpace* ConcurrentMarkSweepGeneration::first_compaction_space() const { return _cmsSpace;
*** 1347,1356 **** --- 1353,1365 ---- #ifndef PRODUCT if (Universe::heap()->promotion_should_fail()) { return NULL; } #endif // #ifndef PRODUCT + if (CMSFastPromotionFailure && has_promotion_failed()) { + return NULL; + } CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; PromotionInfo* promoInfo = &ps->promo; // if we are tracking promotions, then first ensure space for // promotion (including spooling space for saving header if necessary).
*** 1367,1376 **** --- 1376,1388 ---- } assert(promoInfo->has_spooling_space(), "Control point invariant"); const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz); HeapWord* obj_ptr = ps->lab.alloc(alloc_sz); if (obj_ptr == NULL) { + if (CMSFastPromotionFailure && has_promotion_failed()) { + return NULL; + } obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz); if (obj_ptr == NULL) { return NULL; } }
*** 3347,3362 **** --- 3359,3382 ---- } HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { HeapWord* res = NULL; MutexLocker x(ParGCRareEvent_lock); + + if (CMSFastPromotionFailure && has_promotion_failed()) { + // Caller must have checked already without synchronization. + // Check again here while holding the lock. + return NULL; + } + while (true) { // Expansion by some other thread might make alloc OK now: res = ps->lab.alloc(word_sz); if (res != NULL) return res; // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { + set_promotion_failed(); return NULL; } // Otherwise, we try expansion. expand(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
*** 3371,3390 **** --- 3391,3418 ---- bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( PromotionInfo* promo) { MutexLocker x(ParGCRareEvent_lock); + + if (CMSFastPromotionFailure && has_promotion_failed()) { + // Caller must have checked already without synchronization. + // Check again here while holding the lock. + return false; + } + size_t refill_size_bytes = promo->refillSize() * HeapWordSize; while (true) { // Expansion by some other thread might make alloc OK now: if (promo->ensure_spooling_space()) { assert(promo->has_spooling_space(), "Post-condition of successful ensure_spooling_space()"); return true; } // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < refill_size_bytes) { + set_promotion_failed(); return false; } // Otherwise, we try expansion. expand(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
< prev index next >