< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7476 : imported patch expand_for_gc_cause

*** 880,890 **** contiguous_available()/1000); gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", expand_bytes); } // safe if expansion fails ! expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); } } else { --- 880,890 ---- contiguous_available()/1000); gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", expand_bytes); } // safe if expansion fails ! expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); } } else {
*** 1046,1057 **** oop res = _cmsSpace->promote(obj, obj_size); if (res == NULL) { // expand and retry size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords ! expand(s*HeapWordSize, MinHeapDeltaBytes, ! CMSExpansionCause::_satisfy_promotion); // Since there's currently no next generation, we don't try to promote // into a more senior generation. assert(next_gen() == NULL, "assumption, based upon which no attempt " "is made to pass on a possibly failing " "promotion to next generation"); --- 1046,1056 ---- oop res = _cmsSpace->promote(obj, obj_size); if (res == NULL) { // expand and retry size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords ! expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion); // Since there's currently no next generation, we don't try to promote // into a more senior generation. assert(next_gen() == NULL, "assumption, based upon which no attempt " "is made to pass on a possibly failing " "promotion to next generation");
*** 2801,2812 **** bool tlab, bool parallel) { CMSSynchronousYieldRequest yr; assert(!tlab, "Can't deal with TLAB allocation"); MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); ! expand(word_size*HeapWordSize, MinHeapDeltaBytes, ! CMSExpansionCause::_satisfy_allocation); if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } return have_lock_and_allocate(word_size, tlab); } --- 2800,2810 ---- bool tlab, bool parallel) { CMSSynchronousYieldRequest yr; assert(!tlab, "Can't deal with TLAB allocation"); MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); ! expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation); if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } return have_lock_and_allocate(word_size, tlab); }
*** 2816,2826 **** // to CardGeneration and share it... bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { return CardGeneration::expand(bytes, expand_bytes); } ! void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause) { bool success = expand(bytes, expand_bytes); --- 2814,2826 ---- // to CardGeneration and share it... bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { return CardGeneration::expand(bytes, expand_bytes); } ! void ConcurrentMarkSweepGeneration::expand_for_gc_cause( ! size_t bytes, ! size_t expand_bytes, CMSExpansionCause::Cause cause) { bool success = expand(bytes, expand_bytes);
*** 2846,2857 **** // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { return NULL; } // Otherwise, we try expansion. ! expand(word_sz*HeapWordSize, MinHeapDeltaBytes, ! CMSExpansionCause::_allocate_par_lab); // Now go around the loop and try alloc again; // A competing par_promote might beat us to the expansion space, // so we may go around the loop again if promotion fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); --- 2846,2856 ---- // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { return NULL; } // Otherwise, we try expansion. ! expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab); // Now go around the loop and try alloc again; // A competing par_promote might beat us to the expansion space, // so we may go around the loop again if promotion fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
*** 2874,2885 **** // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < refill_size_bytes) { return false; } // Otherwise, we try expansion. ! expand(refill_size_bytes, MinHeapDeltaBytes, ! CMSExpansionCause::_allocate_par_spooling_space); // Now go around the loop and try alloc again; // A competing allocation might beat us to the expansion space, // so we may go around the loop again if allocation fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); --- 2873,2883 ---- // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < refill_size_bytes) { return false; } // Otherwise, we try expansion. ! expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space); // Now go around the loop and try alloc again; // A competing allocation might beat us to the expansion space, // so we may go around the loop again if allocation fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
< prev index next >