< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Print this page
rev 7903 : imported patch 8073013-add-detailed-information-about-plab-memory-usage
rev 7905 : imported patch 8067336-allow-that-plab-allocations-at-the-end-of-regions-are-flexible
rev 7908 : [mq]: 8073317-move-region-level-allocation-into-allocregionmanager

*** 24,77 **** #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP #include "gc_implementation/g1/concurrentMark.hpp" - #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "utilities/taskqueue.hpp" - PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { - switch (dest.value()) { - case InCSetState::Young: - return &_survivor_plab_stats; - case InCSetState::Old: - return &_old_plab_stats; - default: - ShouldNotReachHere(); - return NULL; // Keep some compilers happy - } - } - - size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { - size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(); - // Prevent humongous PLAB sizes for two reasons: - // * PLABs are allocated using a similar paths as oops, but should - // never be in a humongous region - // * Allowing humongous PLABs needlessly churns the region free lists - return MIN2(_humongous_object_threshold_in_words, gclab_word_size); - } - - HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest, - size_t word_size, - AllocationContext_t context) { - switch (dest.value()) { - case InCSetState::Young: - return survivor_attempt_allocation(word_size, context); - case InCSetState::Old: - return old_attempt_allocation(word_size, context); - default: - ShouldNotReachHere(); - return NULL; // Keep some compilers happy - } - } - // Inline functions for G1CollectedHeap inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { return _allocation_context_stats; } --- 24,44 ---- #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp" + #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" + #include "gc_implementation/g1/g1InCSetState.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" + #include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "utilities/taskqueue.hpp" // Inline functions for G1CollectedHeap inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { return _allocation_context_stats; }
*** 136,147 **** assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); AllocationContext_t context = AllocationContext::current(); ! HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, ! false /* bot_updates */); if (result == NULL) { result = attempt_allocation_slow(word_size, context, gc_count_before_ret, gclocker_retry_count_ret); --- 103,113 ---- assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); AllocationContext_t context = AllocationContext::current(); ! HeapWord* result = _allocator->par_allocate_during_mutator(word_size, false /* bot_updates */, context); if (result == NULL) { result = attempt_allocation_slow(word_size, context, gc_count_before_ret, gclocker_retry_count_ret);
*** 151,193 **** dirty_young_block(result, word_size); } return result; } - inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, - AllocationContext_t context) { - assert(!is_humongous(word_size), - "we should not be seeing humongous-size allocations in this path"); - - HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, - false /* bot_updates */); - if (result == NULL) { - MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, - false /* bot_updates */); - } - if (result != NULL) { - dirty_young_block(result, word_size); - } - return result; - } - - inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, - AllocationContext_t context) { - assert(!is_humongous(word_size), - "we should not be seeing humongous-size allocations in this path"); - - HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, - true /* bot_updates */); - if (result == NULL) { - MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, - true /* bot_updates */); - } - return result; - } - // It dirties the cards that cover the block so that so that the post // write barrier never queues anything when updating objects on this // block. It is assumed (and in fact we assert) that the block // belongs to a young region. inline void --- 117,126 ----
< prev index next >