--- old/src/share/vm/gc/g1/g1Allocator.cpp 2015-08-06 16:11:22.620407672 +0200 +++ new/src/share/vm/gc/g1/g1Allocator.cpp 2015-08-06 16:11:22.543405376 +0200 @@ -79,6 +79,8 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { assert_at_safepoint(true /* should_be_vm_thread */); + G1Allocator::init_gc_alloc_regions(evacuation_info); + _survivor_gc_alloc_region.init(); _old_gc_alloc_region.init(); reuse_retained_old_region(evacuation_info, @@ -147,6 +149,22 @@ } } +bool G1Allocator::survivor_is_full(AllocationContext_t context) const { + return _survivor_is_full; +} + +bool G1Allocator::old_is_full(AllocationContext_t context) const { + return _old_is_full; +} + +void G1Allocator::set_survivor_full(AllocationContext_t context) { + _survivor_is_full = true; +} + +void G1Allocator::set_old_full(AllocationContext_t context) { + _old_is_full = true; +} + HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size, AllocationContext_t context) { assert(!_g1h->is_humongous(word_size), @@ -154,10 +172,13 @@ HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size, false /* bot_updates */); - if (result == NULL) { + if (result == NULL && !survivor_is_full(context)) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */); + if (result == NULL) { + set_survivor_full(context); + } } if (result != NULL) { _g1h->dirty_young_block(result, word_size); @@ -172,42 +193,52 @@ HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size, true /* bot_updates */); - if (result == NULL) { + if (result == NULL && !old_is_full(context)) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size, true /* bot_updates */); + if (result == NULL) { + set_old_full(context); + } } return result; } +void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { + _survivor_is_full = false; + _old_is_full = false; +} + G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : _g1h(G1CollectedHeap::heap()), _allocator(allocator), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { } -HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest, +HeapWord* G1PLABAllocator::allocate_inline_or_new_plab(InCSetState dest, size_t word_sz, - AllocationContext_t context) { + AllocationContext_t context, + bool* plab_refill_failed) { size_t gclab_word_size = _g1h->desired_plab_sz(dest); if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { G1PLAB* alloc_buf = alloc_buffer(dest, context); alloc_buf->retire(); HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context); - if (buf == NULL) { - return NULL; // Let caller handle allocation failure. + if (buf != NULL) { + // Otherwise. + alloc_buf->set_word_size(gclab_word_size); + alloc_buf->set_buf(buf); + + HeapWord* const obj = alloc_buf->allocate(word_sz); + assert(obj != NULL, "buffer was definitely big enough..."); + return obj; } // Otherwise. - alloc_buf->set_word_size(gclab_word_size); - alloc_buf->set_buf(buf); - - HeapWord* const obj = alloc_buf->allocate(word_sz); - assert(obj != NULL, "buffer was definitely big enough..."); - return obj; - } else { - return _allocator->par_allocate_during_gc(dest, word_sz, context); + *plab_refill_failed = true; } + // Try inline allocation. + return _allocator->par_allocate_during_gc(dest, word_sz, context); } void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) { --- old/src/share/vm/gc/g1/g1Allocator.hpp 2015-08-06 16:11:23.063420882 +0200 +++ new/src/share/vm/gc/g1/g1Allocator.hpp 2015-08-06 16:11:22.986418586 +0200 @@ -38,11 +38,20 @@ // Also keeps track of retained regions across GCs. class G1Allocator : public CHeapObj { friend class VMStructs; +private: + bool _survivor_is_full; + bool _old_is_full; protected: G1CollectedHeap* _g1h; virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0; + virtual bool survivor_is_full(AllocationContext_t context) const; + virtual bool old_is_full(AllocationContext_t context) const; + + virtual void set_survivor_full(AllocationContext_t context); + virtual void set_old_full(AllocationContext_t context); + // Accessors to the allocation regions. virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0; virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0; @@ -54,7 +63,7 @@ inline HeapWord* old_attempt_allocation(size_t word_size, AllocationContext_t context); public: - G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { } + G1Allocator(G1CollectedHeap* heap) : _g1h(heap), _survivor_is_full(false), _old_is_full(false) { } virtual ~G1Allocator() { } static G1Allocator* create_allocator(G1CollectedHeap* g1h); @@ -66,7 +75,7 @@ virtual void init_mutator_alloc_region() = 0; virtual void release_mutator_alloc_region() = 0; - virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0; + virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info); virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0; virtual void abandon_gc_alloc_regions() = 0; @@ -223,12 +232,14 @@ virtual void waste(size_t& wasted, size_t& undo_wasted) = 0; - // Allocate word_sz words in dest, either directly into the regions or by + // Allocate word_sz words in dest, either directly into the regions (inline) or by // allocating a new PLAB. Returns the address of the allocated memory, NULL if - // not successful. - HeapWord* allocate_direct_or_new_plab(InCSetState dest, + // not successful. Plab_refill_failed indicates whether an attempt to refill the + // PLAB failed or not. + HeapWord* allocate_inline_or_new_plab(InCSetState dest, size_t word_sz, - AllocationContext_t context); + AllocationContext_t context, + bool* plab_refill_failed); // Allocate word_sz words in the PLAB of dest. Returns the address of the // allocated memory, NULL if not successful. @@ -243,13 +254,15 @@ } } - HeapWord* allocate(InCSetState dest, size_t word_sz, - AllocationContext_t context) { + HeapWord* allocate(InCSetState dest, + size_t word_sz, + AllocationContext_t context, + bool* refill_failed) { HeapWord* const obj = plab_allocate(dest, word_sz, context); if (obj != NULL) { return obj; } - return allocate_direct_or_new_plab(dest, word_sz, context); + return allocate_inline_or_new_plab(dest, word_sz, context, refill_failed); } void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context); --- old/src/share/vm/gc/g1/g1ParScanThreadState.cpp 2015-08-06 16:11:24.422461405 +0200 +++ new/src/share/vm/gc/g1/g1ParScanThreadState.cpp 2015-08-06 16:11:24.345459109 +0200 @@ -41,7 +41,9 @@ _term_attempts(0), _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()), _age_table(false), _scanner(g1h, rp), - _strong_roots_time(0), _term_time(0) { + _strong_roots_time(0), _term_time(0), + _last_gen_is_full(false) +{ _scanner.set_par_scan_thread_state(this); // we allocate G1YoungSurvRateNumRegions plus one entries, since // we "sacrifice" entry 0 to keep track of surviving bytes for @@ -160,26 +162,38 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, InCSetState* dest, size_t word_sz, - AllocationContext_t const context) { + AllocationContext_t const context, + bool previous_plab_refill_failed) { assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value())); assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); // Right now we only have two types of regions (young / old) so // let's keep the logic here simple. We can generalize it when necessary. if (dest->is_young()) { + bool plab_refill_in_old_failed = false; HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, word_sz, - context); - if (obj_ptr == NULL) { - return NULL; - } + context, + &plab_refill_in_old_failed); // Make sure that we won't attempt to copy any other objects out // of a survivor region (given that apparently we cannot allocate - // any new ones) to avoid coming into this slow path. - _tenuring_threshold = 0; - dest->set_old(); + // any new ones) to avoid coming into this slow path again and again. + // Only consider failed PLAB refill here: failed inline allocations are + // typically large, so not indicative of remaining space. + if (previous_plab_refill_failed) { + _tenuring_threshold = 0; + } + + if (obj_ptr != NULL) { + dest->set_old(); + } else { + // We just failed to allocate in old gen. The same idea as explained above + // for making survivor gen unavailable for allocation applies for old gen. + _last_gen_is_full = plab_refill_in_old_failed; + } return obj_ptr; } else { + _last_gen_is_full = previous_plab_refill_failed; assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); // no other space to try. return NULL; @@ -210,14 +224,20 @@ uint age = 0; InCSetState dest_state = next_state(state, old_mark, age); + // The second clause is to prevent premature evacuation failure in case there + // is still space in survivor, but old gen is full. + if (_last_gen_is_full && dest_state.is_old()) { + return handle_evacuation_failure_par(old, old_mark); + } HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context); // PLAB allocations should succeed most of the time, so we'll // normally check against NULL once and that's it. if (obj_ptr == NULL) { - obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context); + bool plab_refill_failed = false; + obj_ptr = _plab_allocator->allocate_inline_or_new_plab(dest_state, word_sz, context, &plab_refill_failed); if (obj_ptr == NULL) { - obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context); + obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed); if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. --- old/src/share/vm/gc/g1/g1ParScanThreadState.hpp 2015-08-06 16:11:24.866474645 +0200 +++ new/src/share/vm/gc/g1/g1ParScanThreadState.hpp 2015-08-06 16:11:24.790472379 +0200 @@ -71,6 +71,10 @@ // this points into the array, as we use the first few entries for padding size_t* _surviving_young_words; + // Indicates whether in the last generation (old) there is no more space + // available for allocation. + bool _last_gen_is_full; + #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) DirtyCardQueue& dirty_card_queue() { return _dcq; } @@ -188,12 +192,16 @@ // Tries to allocate word_sz in the PLAB of the next "generation" after trying to // allocate into dest. State is the original (source) cset state for the object - // that is allocated for. + // that is allocated for. Previous_plab_refill_failed indicates whether previously + // a PLAB refill into "state" failed. // Returns a non-NULL pointer if successful, and updates dest if required. + // Also determines whether we should continue to try to allocate into the various + // generations or just end trying to allocate. HeapWord* allocate_in_next_plab(InCSetState const state, InCSetState* dest, size_t word_sz, - AllocationContext_t const context); + AllocationContext_t const context, + bool previous_plab_refill_failed); inline InCSetState next_state(InCSetState const state, markOop const m, uint& age); public: --- old/src/share/vm/gc/g1/heapRegionSet.hpp 2015-08-06 16:11:25.312487944 +0200 +++ new/src/share/vm/gc/g1/heapRegionSet.hpp 2015-08-06 16:11:25.235485648 +0200 @@ -121,7 +121,7 @@ uint length() const { return _count.length(); } - bool is_empty() { return _count.length() == 0; } + bool is_empty() const { return _count.length() == 0; } size_t total_capacity_bytes() { return _count.capacity();