< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7476 : imported patch expand_for_gc_cause
rev 7477 : imported patch move_stuff_up

*** 791,819 **** if (CMSDumpAtPromotionFailure) { cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty); } } - CompactibleSpace* - ConcurrentMarkSweepGeneration::first_compaction_space() const { - return _cmsSpace; - } - void ConcurrentMarkSweepGeneration::reset_after_compaction() { // Clear the promotion information. These pointers can be adjusted // along with all the other pointers into the heap but // compaction is expected to be a rare event with // a heap using cms so don't do it without seeing the need. for (uint i = 0; i < ParallelGCThreads; i++) { _par_gc_thread_states[i]->promo.reset(); } } - void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) { - blk->do_space(_cmsSpace); - } - void ConcurrentMarkSweepGeneration::compute_new_size() { assert_locked_or_safepoint(Heap_lock); // If incremental collection failed, we just want to expand // to the limit. --- 791,810 ----
*** 880,890 **** contiguous_available()/1000); gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", expand_bytes); } // safe if expansion fails ! expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); } } else { --- 871,881 ---- contiguous_available()/1000); gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", expand_bytes); } // safe if expansion fails ! expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); } } else {
*** 1046,1057 **** oop res = _cmsSpace->promote(obj, obj_size); if (res == NULL) { // expand and retry size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords ! expand(s*HeapWordSize, MinHeapDeltaBytes, ! CMSExpansionCause::_satisfy_promotion); // Since there's currently no next generation, we don't try to promote // into a more senior generation. assert(next_gen() == NULL, "assumption, based upon which no attempt " "is made to pass on a possibly failing " "promotion to next generation"); --- 1037,1047 ---- oop res = _cmsSpace->promote(obj, obj_size); if (res == NULL) { // expand and retry size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords ! expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion); // Since there's currently no next generation, we don't try to promote // into a more senior generation. assert(next_gen() == NULL, "assumption, based upon which no attempt " "is made to pass on a possibly failing " "promotion to next generation");
*** 2623,2639 **** } ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) void - ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { - cl->set_generation(this); - younger_refs_in_space_iterate(_cmsSpace, cl); - cl->reset_generation(); - } - - void ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) { if (freelistLock()->owned_by_self()) { Generation::oop_iterate(cl); } else { MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); --- 2613,2622 ----
*** 2801,2826 **** bool tlab, bool parallel) { CMSSynchronousYieldRequest yr; assert(!tlab, "Can't deal with TLAB allocation"); MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); ! expand(word_size*HeapWordSize, MinHeapDeltaBytes, ! CMSExpansionCause::_satisfy_allocation); if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } return have_lock_and_allocate(word_size, tlab); } ! // YSR: All of this generation expansion/shrinking stuff is an exact copy of ! // TenuredGeneration, which makes me wonder if we should move this ! // to CardGeneration and share it... ! bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { ! return CardGeneration::expand(bytes, expand_bytes); ! } ! ! void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause) { bool success = expand(bytes, expand_bytes); --- 2784,2803 ---- bool tlab, bool parallel) { CMSSynchronousYieldRequest yr; assert(!tlab, "Can't deal with TLAB allocation"); MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); ! expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation); if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } return have_lock_and_allocate(word_size, tlab); } ! void ConcurrentMarkSweepGeneration::expand_for_gc_cause( ! size_t bytes, ! size_t expand_bytes, CMSExpansionCause::Cause cause) { bool success = expand(bytes, expand_bytes);
*** 2846,2857 **** // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { return NULL; } // Otherwise, we try expansion. ! expand(word_sz*HeapWordSize, MinHeapDeltaBytes, ! CMSExpansionCause::_allocate_par_lab); // Now go around the loop and try alloc again; // A competing par_promote might beat us to the expansion space, // so we may go around the loop again if promotion fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); --- 2823,2833 ---- // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { return NULL; } // Otherwise, we try expansion. ! expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab); // Now go around the loop and try alloc again; // A competing par_promote might beat us to the expansion space, // so we may go around the loop again if promotion fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
*** 2874,2965 **** // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < refill_size_bytes) { return false; } // Otherwise, we try expansion. ! expand(refill_size_bytes, MinHeapDeltaBytes, ! CMSExpansionCause::_allocate_par_spooling_space); // Now go around the loop and try alloc again; // A competing allocation might beat us to the expansion space, // so we may go around the loop again if allocation fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } } } - - void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) { - assert_locked_or_safepoint(ExpandHeap_lock); - // Shrink committed space - _virtual_space.shrink_by(bytes); - // Shrink space; this also shrinks the space's BOT - _cmsSpace->set_end((HeapWord*) _virtual_space.high()); - size_t new_word_size = heap_word_size(_cmsSpace->capacity()); - // Shrink the shared block offset array - _bts->resize(new_word_size); - MemRegion mr(_cmsSpace->bottom(), new_word_size); - // Shrink the card table - Universe::heap()->barrier_set()->resize_covered_region(mr); - - if (Verbose && PrintGC) { - size_t new_mem_size = _virtual_space.committed_size(); - size_t old_mem_size = new_mem_size + bytes; - gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", - name(), old_mem_size/K, new_mem_size/K); - } - } - void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { - assert_locked_or_safepoint(Heap_lock); - size_t size = ReservedSpace::page_align_size_down(bytes); // Only shrink if a compaction was done so that all the free space // in the generation is in a contiguous block at the end. ! if (size > 0 && did_compact()) { ! shrink_by(size); ! } ! } ! ! bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) { ! assert_locked_or_safepoint(Heap_lock); ! bool result = _virtual_space.expand_by(bytes); ! if (result) { ! size_t new_word_size = ! heap_word_size(_virtual_space.committed_size()); ! MemRegion mr(_cmsSpace->bottom(), new_word_size); ! _bts->resize(new_word_size); // resize the block offset shared array ! Universe::heap()->barrier_set()->resize_covered_region(mr); ! // Hmmmm... why doesn't CFLS::set_end verify locking? ! // This is quite ugly; FIX ME XXX ! _cmsSpace->assert_locked(freelistLock()); ! _cmsSpace->set_end((HeapWord*)_virtual_space.high()); ! ! // update the space and generation capacity counters ! if (UsePerfData) { ! _space_counters->update_capacity(); ! _gen_counters->update_all(); ! } ! ! if (Verbose && PrintGC) { ! size_t new_mem_size = _virtual_space.committed_size(); ! size_t old_mem_size = new_mem_size - bytes; ! gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", ! name(), old_mem_size/K, bytes/K, new_mem_size/K); ! } } - return result; } ! bool ConcurrentMarkSweepGeneration::grow_to_reserved() { assert_locked_or_safepoint(Heap_lock); - bool success = true; - const size_t remaining_bytes = _virtual_space.uncommitted_size(); - if (remaining_bytes > 0) { - success = grow_by(remaining_bytes); - DEBUG_ONLY(if (!success) warning("grow to reserved failed");) - } - return success; } void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); --- 2850,2879 ---- // If there's not enough expansion space available, give up. if (_virtual_space.uncommitted_size() < refill_size_bytes) { return false; } // Otherwise, we try expansion. ! expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space); // Now go around the loop and try alloc again; // A competing allocation might beat us to the expansion space, // so we may go around the loop again if allocation fails again. if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } } } void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { // Only shrink if a compaction was done so that all the free space // in the generation is in a contiguous block at the end. ! if (did_compact()) { ! CardGeneration::shrink(bytes); } } ! void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() { assert_locked_or_safepoint(Heap_lock); } void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock());
< prev index next >