/* * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1AllocRegion.inline.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1MarkSweep.hpp" #include "gc/g1/vm_operations_g1.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/shared/gcLocker.hpp" #include "runtime/vmThread.hpp" G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _survivor_is_full(false), _old_is_full(false), _retained_old_gc_alloc_region(NULL), _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)), _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) { } void G1DefaultAllocator::init_mutator_alloc_region() { assert(_mutator_alloc_region.get() == NULL, "pre-condition"); _mutator_alloc_region.init(); } void G1DefaultAllocator::release_mutator_alloc_region() { _mutator_alloc_region.release(); assert(_mutator_alloc_region.get() == NULL, "post-condition"); } void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, OldGCAllocRegion* old, HeapRegion** retained_old) { HeapRegion* retained_region = *retained_old; *retained_old = NULL; assert(retained_region == NULL || !retained_region->is_archive(), "Archive region should not be alloc region (index %u)", retained_region->hrm_index()); // We will discard the current GC alloc region if: // a) it's in the collection set (it can happen!), // b) it's already full (no point in using it), // c) it's empty (this means that it was emptied during // a cleanup and it should be on the free list now), or // d) it's humongous (this means that it was emptied // during a cleanup and was added to the free list, but // has been subsequently used to allocate a humongous // object that may be less than the region size). if (retained_region != NULL && !retained_region->in_collection_set() && !(retained_region->top() == retained_region->end()) && !retained_region->is_empty() && !retained_region->is_humongous()) { retained_region->record_timestamp(); // The retained region was added to the old region set when it was // retired. We have to remove it now, since we don't allow regions // we allocate to in the region sets. We'll re-add it later, when // it's retired again. _g1h->old_set_remove(retained_region); bool during_im = _g1h->collector_state()->during_initial_mark_pause(); retained_region->note_start_of_copying(during_im); old->set(retained_region); _g1h->hr_printer()->reuse(retained_region); evacuation_info.set_alloc_regions_used_before(retained_region->used()); } } void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { assert_at_safepoint(true /* should_be_vm_thread */); _survivor_is_full = false; _old_is_full = false; _survivor_gc_alloc_region.init(); _old_gc_alloc_region.init(); reuse_retained_old_region(evacuation_info, &_old_gc_alloc_region, &_retained_old_gc_alloc_region); } void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) { AllocationContext_t context = AllocationContext::current(); evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() + old_gc_alloc_region(context)->count()); survivor_gc_alloc_region(context)->release(); // If we have an old GC alloc region to release, we'll save it in // _retained_old_gc_alloc_region. If we don't // _retained_old_gc_alloc_region will become NULL. This is what we // want either way so no reason to check explicitly for either // condition. _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release(); if (_retained_old_gc_alloc_region != NULL) { _retained_old_gc_alloc_region->record_retained_region(); } } void G1DefaultAllocator::abandon_gc_alloc_regions() { assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); _retained_old_gc_alloc_region = NULL; } bool G1DefaultAllocator::survivor_is_full(AllocationContext_t context) const { return _survivor_is_full; } bool G1DefaultAllocator::old_is_full(AllocationContext_t context) const { return _old_is_full; } void G1DefaultAllocator::set_survivor_full(AllocationContext_t context) { _survivor_is_full = true; } void G1DefaultAllocator::set_old_full(AllocationContext_t context) { _old_is_full = true; } G1PLAB::G1PLAB(size_t gclab_word_size) : PLAB(gclab_word_size), _retired(true) { } size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) { // Return the remaining space in the cur alloc region, but not less than // the min TLAB size. // Also, this value can be at most the humongous object threshold, // since we can't allow tlabs to grow big enough to accommodate // humongous objects. HeapRegion* hr = mutator_alloc_region(context)->get(); size_t max_tlab = _g1h->max_tlab_size() * wordSize; if (hr == NULL) { return max_tlab; } else { return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); } } inline bool G1Allocator::is_humongous(size_t word_size) { return G1CollectedHeap::is_humongous(word_size); } HeapWord* G1Allocator::attempt_allocation_slow(size_t word_size, AllocationContext_t context, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { // Make sure you read the note in attempt_allocation_humongous(). assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation_slow() should not " "be called for humongous allocation requests"); // We should only get here after the first-level allocation attempt // (attempt_allocation()) failed to allocate. // We will loop until a) we manage to successfully perform the // allocation or b) we successfully schedule a collection which // fails to perform the allocation. b) is the only case when we'll // return NULL. HeapWord* result = NULL; for (int try_count = 1; /* we'll return */; try_count += 1) { bool should_try_gc; uint gc_count_before; { MutexLockerEx x(Heap_lock); result = attempt_allocation_locked(word_size, context); if (result != NULL) { return result; } if (GCLocker::is_active_and_needs_gc()) { if (_g1h->g1_policy()->can_expand_young_list()) { // No need for an ergo verbose message here, // can_expand_young_list() does this when it returns true. result = attempt_allocation_force(word_size, context); if (result != NULL) { return result; } } should_try_gc = false; } else { // The GCLocker may not be active but the GCLocker initiated // GC may not yet have been performed (GCLocker::needs_gc() // returns true). In this case we do not try this GC and // wait until the GCLocker initiated GC is performed, and // then retry the allocation. if (GCLocker::needs_gc()) { should_try_gc = false; } else { // Read the GC count while still holding the Heap_lock. gc_count_before = _g1h->total_collections(); should_try_gc = true; } } } if (should_try_gc) { bool succeeded; result = _g1h->do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; } if (succeeded) { // If we get here we successfully scheduled a collection which // failed to allocate. No point in trying to allocate // further. We'll just return NULL. MutexLockerEx x(Heap_lock); *gc_count_before_ret = _g1h->total_collections(); return NULL; } } else { if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) { MutexLockerEx x(Heap_lock); *gc_count_before_ret = _g1h->total_collections(); return NULL; } // The GCLocker is either active or the GCLocker initiated // GC has not yet been performed. Stall until it is and // then retry the allocation. GCLocker::stall_until_clear(); (*gclocker_retry_count_ret) += 1; } // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully // performed a collection and reclaimed enough space. We do the // first attempt (without holding the Heap_lock) here and the // follow-on attempt will be at the start of the next loop // iteration (after taking the Heap_lock). result = attempt_allocation(word_size, context); if (result != NULL) { return result; } // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { warning("G1CollectedHeap::attempt_allocation_slow() " "retries %d times", try_count); } } ShouldNotReachHere(); return NULL; } HeapWord* G1Allocator::humongous_obj_allocate_initialize_regions(uint first, uint num_regions, size_t word_size, AllocationContext_t context) { assert(first != G1_NO_HRM_INDEX, "pre-condition"); assert(is_humongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); // Index of last region in the series. uint last = first + num_regions - 1; // We need to initialize the region(s) we just discovered. This is // a bit tricky given that it can happen concurrently with // refinement threads refining cards on these regions and // potentially wanting to refine the BOT as they are scanning // those cards (this can happen shortly after a cleanup; see CR // 6991377). So we have to set up the region(s) carefully and in // a specific order. // The word size sum of all the regions we will allocate. size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords; assert(word_size <= word_size_sum, "sanity"); // This will be the "starts humongous" region. HeapRegion* first_hr = _g1h->region_at(first); // The header of the new object will be placed at the bottom of // the first region. HeapWord* new_obj = first_hr->bottom(); // This will be the new top of the new object. HeapWord* obj_top = new_obj + word_size; // First, we need to zero the header of the space that we will be // allocating. When we update top further down, some refinement // threads might try to scan the region. By zeroing the header we // ensure that any thread that will try to scan the region will // come across the zero klass word and bail out. // // NOTE: It would not have been correct to have used // CollectedHeap::fill_with_object() and make the space look like // an int array. The thread that is doing the allocation will // later update the object header to a potentially different array // type and, for a very short period of time, the klass and length // fields will be inconsistent. This could cause a refinement // thread to calculate the object size incorrectly. Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); // How many words we use for filler objects. size_t word_fill_size = word_size_sum - word_size; // How many words memory we "waste" which cannot hold a filler object. size_t words_not_fillable = 0; if (word_fill_size >= _g1h->min_fill_size()) { _g1h->fill_with_objects(obj_top, word_fill_size); } else if (word_fill_size > 0) { // We have space to fill, but we cannot fit an object there. words_not_fillable = word_fill_size; word_fill_size = 0; } // We will set up the first region as "starts humongous". This // will also update the BOT covering all the regions to reflect // that there is a single object that starts at the bottom of the // first region. first_hr->set_starts_humongous(obj_top, word_fill_size); first_hr->set_allocation_context(context); // Then, if there are any, we will set up the "continues // humongous" regions. HeapRegion* hr = NULL; for (uint i = first + 1; i <= last; ++i) { hr = _g1h->region_at(i); hr->set_continues_humongous(first_hr); hr->set_allocation_context(context); } // Up to this point no concurrent thread would have been able to // do any scanning on any region in this series. All the top // fields still point to bottom, so the intersection between // [bottom,top] and [card_start,card_end] will be empty. Before we // update the top fields, we'll do a storestore to make sure that // no thread sees the update to top before the zeroing of the // object header and the BOT initialization. OrderAccess::storestore(); // Now, we will update the top fields of the "continues humongous" // regions except the last one. for (uint i = first; i < last; ++i) { hr = _g1h->region_at(i); hr->set_top(hr->end()); } hr = _g1h->region_at(last); // If we cannot fit a filler object, we must set top to the end // of the humongous object, otherwise we cannot iterate the heap // and the BOT will not be complete. hr->set_top(hr->end() - words_not_fillable); assert(hr->bottom() < obj_top && obj_top <= hr->end(), "obj_top should be in last region"); _g1h->verifier()->check_bitmaps("Humongous Region Allocation", first_hr); assert(words_not_fillable == 0 || first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(), "Miscalculation in humongous allocation"); _g1h->increase_used((word_size_sum - words_not_fillable) * HeapWordSize); for (uint i = first; i <= last; ++i) { hr = _g1h->region_at(i); _g1h->_humongous_set.add(hr); _g1h->hr_printer()->alloc(hr); } return new_obj; } size_t G1Allocator::humongous_obj_size_in_regions(size_t word_size) { assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size); return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; } HeapWord* G1Allocator::attempt_allocation_at_safepoint(size_t word_size, AllocationContext_t context, bool expect_null_mutator_alloc_region) { assert_at_safepoint(true /* should_be_vm_thread */); assert(!has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region, "the current alloc region was unexpectedly found to be non-NULL"); if (!is_humongous(word_size)) { return attempt_allocation_locked(word_size, context); } else { HeapWord* result = humongous_obj_allocate(word_size, context); if (result != NULL && _g1h->g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { _g1h->collector_state()->set_initiate_conc_mark_if_possible(true); } return result; } ShouldNotReachHere(); } // If could fit into free regions w/o expansion, try. // Otherwise, if can expand, do so. // Otherwise, if using ex regions might help, try with ex given back. HeapWord* G1Allocator::humongous_obj_allocate(size_t word_size, AllocationContext_t context) { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); _g1h->verifier()->verify_region_sets_optional(); uint first = G1_NO_HRM_INDEX; uint obj_regions = (uint) humongous_obj_size_in_regions(word_size); if (obj_regions == 1) { // Only one region to allocate, try to use a fast path by directly allocating // from the free lists. Do not try to expand here, we will potentially do that // later. HeapRegion* hr = _g1h->new_region(word_size, true /* is_old */, false /* do_expand */); if (hr != NULL) { first = hr->hrm_index(); } } else { // We can't allocate humongous regions spanning more than one region while // cleanupComplete() is running, since some of the regions we find to be // empty might not yet be added to the free list. It is not straightforward // to know in which list they are on so that we can remove them. We only // need to do this if we need to allocate more than one region to satisfy the // current humongous allocation request. If we are only allocating one region // we use the one-region region allocation code (see above), that already // potentially waits for regions from the secondary free list. _g1h->wait_while_free_regions_coming(); _g1h->append_secondary_free_list_if_not_empty_with_lock(); // Policy: Try only empty regions (i.e. already committed first). Maybe we // are lucky enough to find some. first = _g1h->_hrm.find_contiguous_only_empty(obj_regions); if (first != G1_NO_HRM_INDEX) { _g1h->_hrm.allocate_free_regions_starting_at(first, obj_regions); } } if (first == G1_NO_HRM_INDEX) { // Policy: We could not find enough regions for the humongous object in the // free list. Look through the heap to find a mix of free and uncommitted regions. // If so, try expansion. first = _g1h->_hrm.find_contiguous_empty_or_unavailable(obj_regions); if (first != G1_NO_HRM_INDEX) { // We found something. Make sure these regions are committed, i.e. expand // the heap. Alternatively we could do a defragmentation GC. log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); _g1h->_hrm.expand_at(first, obj_regions); _g1h->g1_policy()->record_new_heap_size(_g1h->num_regions()); #ifdef ASSERT for (uint i = first; i < first + obj_regions; ++i) { HeapRegion* hr = _g1h->region_at(i); assert(hr->is_free(), "sanity"); assert(hr->is_empty(), "sanity"); assert(_g1h->is_on_master_free_list(hr), "sanity"); } #endif _g1h->_hrm.allocate_free_regions_starting_at(first, obj_regions); } else { // Policy: Potentially trigger a defragmentation GC. } } HeapWord* result = NULL; if (first != G1_NO_HRM_INDEX) { result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size, context); assert(result != NULL, "it should always return a valid result"); // A successful humongous object allocation changes the used space // information of the old generation so we need to recalculate the // sizes and update the jstat counters here. _g1h->g1mm()->update_sizes(); } _g1h->verifier()->verify_region_sets_optional(); return result; } HeapWord* G1Allocator::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if // allocation is not humongous do this, otherwise do that" // conditional paths which would obscure its flow. In fact, an early // version of this code did use a unified method which was harder to // follow and, as a result, it had subtle bugs that were hard to // track down. So keeping these two methods separate allows each to // be more readable. It will be good to keep these two in sync as // much as possible. assert_heap_not_locked_and_not_at_safepoint(); assert(is_humongous(word_size), "attempt_allocation_humongous() " "should only be called for humongous allocations"); // Humongous objects can exhaust the heap quickly, so we should check if we // need to start a marking cycle at each humongous object allocation. We do // the check before we do the actual allocation. The reason for doing it // before the allocation is that we avoid having to keep track of the newly // allocated memory while we do a GC. if (_g1h->g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) { _g1h->collect(GCCause::_g1_humongous_allocation); } // We will loop until a) we manage to successfully perform the // allocation or b) we successfully schedule a collection which // fails to perform the allocation. b) is the only case when we'll // return NULL. HeapWord* result = NULL; for (int try_count = 1; /* we'll return */; try_count += 1) { bool should_try_gc; uint gc_count_before; { MutexLockerEx x(Heap_lock); // Given that humongous objects are not allocated in young // regions, we'll first try to do the allocation without doing a // collection hoping that there's enough space in the heap. result = humongous_obj_allocate(word_size, AllocationContext::current()); if (result != NULL) { size_t size_in_regions = humongous_obj_size_in_regions(word_size); _g1h->g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes); return result; } if (GCLocker::is_active_and_needs_gc()) { should_try_gc = false; } else { // The GCLocker may not be active but the GCLocker initiated // GC may not yet have been performed (GCLocker::needs_gc() // returns true). In this case we do not try this GC and // wait until the GCLocker initiated GC is performed, and // then retry the allocation. if (GCLocker::needs_gc()) { should_try_gc = false; } else { // Read the GC count while still holding the Heap_lock. gc_count_before = _g1h->total_collections(); should_try_gc = true; } } } if (should_try_gc) { // If we failed to allocate the humongous object, we should try to // do a collection pause (if we're allowed) in case it reclaims // enough space for the allocation to succeed after the pause. bool succeeded; result = _g1h->do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; } if (succeeded) { // If we get here we successfully scheduled a collection which // failed to allocate. No point in trying to allocate // further. We'll just return NULL. MutexLockerEx x(Heap_lock); *gc_count_before_ret = _g1h->total_collections(); return NULL; } } else { if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) { MutexLockerEx x(Heap_lock); *gc_count_before_ret = _g1h->total_collections(); return NULL; } // The GCLocker is either active or the GCLocker initiated // GC has not yet been performed. Stall until it is and // then retry the allocation. GCLocker::stall_until_clear(); (*gclocker_retry_count_ret) += 1; } // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully // performed a collection and reclaimed enough space. Give a // warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { warning("G1CollectedHeap::attempt_allocation_humongous() " "retries %d times", try_count); } } ShouldNotReachHere(); return NULL; } HeapWord* G1Allocator::mem_allocate(size_t word_size, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); // Loop until the allocation is satisfied, or unsatisfied after GC. for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { uint gc_count_before; HeapWord* result = NULL; if (!is_humongous(word_size)) { result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); } else { result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count); } if (result != NULL) { return result; } // Create the garbage collection operation... VM_G1CollectForAllocation op(gc_count_before, word_size); op.set_allocation_context(AllocationContext::current()); // ...and get the VM thread to execute it. VMThread::execute(&op); if (op.prologue_succeeded() && op.pause_succeeded()) { // If the operation was successful we'll return the result even // if it is NULL. If the allocation attempt failed immediately // after a Full GC, it's unlikely we'll be able to allocate now. HeapWord* result = op.result(); if (result != NULL && !is_humongous(word_size)) { // Allocations that take place on VM operations do not do any // card dirtying and we have to do it here. We only have to do // this for non-humongous allocations, though. _g1h->dirty_young_block(result, word_size); } return result; } else { if (gclocker_retry_count > GCLockerRetryAllocationCount) { return NULL; } assert(op.result() == NULL, "the result should be NULL if the VM op did not succeed"); } // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { warning("G1CollectedHeap::mem_allocate retries %d times", try_count); } } ShouldNotReachHere(); return NULL; } #ifndef PRODUCT void G1Allocator::allocate_dummy_regions(size_t word_size) { // And as a result the region we'll allocate will be humongous. guarantee(is_humongous(word_size), "sanity"); for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { // Let's use the existing mechanism for the allocation HeapWord* dummy_obj = humongous_obj_allocate(word_size, AllocationContext::system()); if (dummy_obj != NULL) { MemRegion mr(dummy_obj, word_size); CollectedHeap::fill_with_object(mr); } else { // If we can't allocate once, we probably cannot allocate // again. Let's get out of the loop. break; } } } #endif // !PRODUCT inline HeapWord* G1Allocator::attempt_allocation(size_t word_size, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); AllocationContext_t context = AllocationContext::current(); HeapWord* result = attempt_allocation(word_size, context); if (result == NULL) { result = attempt_allocation_slow(word_size, context, gc_count_before_ret, gclocker_retry_count_ret); } assert_heap_not_locked(); if (result != NULL) { _g1h->dirty_young_block(result, word_size); } return result; } HeapWord* G1Allocator::allocate_new_tlab(size_t word_size) { assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "we do not allow humongous TLABs"); uint dummy_gc_count_before; uint dummy_gclocker_retry_count = 0; return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); } HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, size_t word_size, AllocationContext_t context) { size_t temp = 0; HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context); assert(result == NULL || temp == word_size, "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, word_size, temp, p2i(result)); return result; } HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, size_t min_word_size, size_t desired_word_size, size_t* actual_word_size, AllocationContext_t context) { switch (dest.value()) { case InCSetState::Young: return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context); case InCSetState::Old: return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context); default: ShouldNotReachHere(); return NULL; // Keep some compilers happy } } HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size, AllocationContext_t context) { assert(!is_humongous(desired_word_size), "we should not be seeing humongous-size allocations in this path"); HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size, desired_word_size, actual_word_size, false /* bot_updates */); if (result == NULL && !survivor_is_full(context)) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size, desired_word_size, actual_word_size, false /* bot_updates */); if (result == NULL) { set_survivor_full(context); } } if (result != NULL) { _g1h->dirty_young_block(result, *actual_word_size); } return result; } HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size, AllocationContext_t context) { assert(!is_humongous(desired_word_size), "we should not be seeing humongous-size allocations in this path"); HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size, desired_word_size, actual_word_size, true /* bot_updates */); if (result == NULL && !old_is_full(context)) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size, desired_word_size, actual_word_size, true /* bot_updates */); if (result == NULL) { set_old_full(context); } } return result; } G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : _g1h(G1CollectedHeap::heap()), _allocator(allocator), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) { _direct_allocated[i] = 0; } } bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const { return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct); } HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest, size_t word_sz, AllocationContext_t context, bool* plab_refill_failed) { size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest); size_t required_in_plab = PLAB::size_required_for_allocation(word_sz); // Only get a new PLAB if the allocation fits and it would not waste more than // ParallelGCBufferWastePct in the existing buffer. if ((required_in_plab <= plab_word_size) && may_throw_away_buffer(required_in_plab, plab_word_size)) { G1PLAB* alloc_buf = alloc_buffer(dest, context); alloc_buf->retire(); size_t actual_plab_size = 0; HeapWord* buf = _allocator->par_allocate_during_gc(dest, required_in_plab, plab_word_size, &actual_plab_size, context); assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, required_in_plab, plab_word_size, actual_plab_size, p2i(buf)); if (buf != NULL) { alloc_buf->set_buf(buf, actual_plab_size); HeapWord* const obj = alloc_buf->allocate(word_sz); assert(obj != NULL, "PLAB should have been big enough, tried to allocate " SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT, word_sz, required_in_plab, plab_word_size); return obj; } // Otherwise. *plab_refill_failed = true; } // Try direct allocation. HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context); if (result != NULL) { _direct_allocated[dest.value()] += word_sz; } return result; } void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) { alloc_buffer(dest, context)->undo_allocation(obj, word_sz); } G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) : G1PLABAllocator(allocator), _surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)), _tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) { for (uint state = 0; state < InCSetState::Num; state++) { _alloc_buffers[state] = NULL; } _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer; _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer; } void G1DefaultPLABAllocator::flush_and_retire_stats() { for (uint state = 0; state < InCSetState::Num; state++) { G1PLAB* const buf = _alloc_buffers[state]; if (buf != NULL) { G1EvacStats* stats = _g1h->alloc_buffer_stats(state); buf->flush_and_retire_stats(stats); stats->add_direct_allocated(_direct_allocated[state]); _direct_allocated[state] = 0; } } } void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) { wasted = 0; undo_wasted = 0; for (uint state = 0; state < InCSetState::Num; state++) { G1PLAB * const buf = _alloc_buffers[state]; if (buf != NULL) { wasted += buf->waste(); undo_wasted += buf->undo_waste(); } } } G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) { // Create the archive allocator, and also enable archive object checking // in mark-sweep, since we will be creating archive regions. G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h); G1MarkSweep::enable_archive_object_check(); return result; } bool G1ArchiveAllocator::alloc_new_region() { // Allocate the highest free region in the reserved heap, // and add it to our list of allocated regions. It is marked // archive and added to the old set. HeapRegion* hr = _g1h->alloc_highest_free_region(); if (hr == NULL) { return false; } assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index()); hr->set_archive(); _g1h->old_set_add(hr); _g1h->hr_printer()->alloc(hr); _allocated_regions.append(hr); _allocation_region = hr; // Set up _bottom and _max to begin allocating in the lowest // min_region_size'd chunk of the allocated G1 region. _bottom = hr->bottom(); _max = _bottom + HeapRegion::min_region_size_in_words(); // Tell mark-sweep that objects in this region are not to be marked. G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true); // Since we've modified the old set, call update_sizes. _g1h->g1mm()->update_sizes(); return true; } HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { assert(word_size != 0, "size must not be zero"); if (_allocation_region == NULL) { if (!alloc_new_region()) { return NULL; } } HeapWord* old_top = _allocation_region->top(); assert(_bottom >= _allocation_region->bottom(), "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT, p2i(_bottom), p2i(_allocation_region->bottom())); assert(_max <= _allocation_region->end(), "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT, p2i(_max), p2i(_allocation_region->end())); assert(_bottom <= old_top && old_top <= _max, "inconsistent allocation state: expected " PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, p2i(_bottom), p2i(old_top), p2i(_max)); // Allocate the next word_size words in the current allocation chunk. // If allocation would cross the _max boundary, insert a filler and begin // at the base of the next min_region_size'd chunk. Also advance to the next // chunk if we don't yet cross the boundary, but the remainder would be too // small to fill. HeapWord* new_top = old_top + word_size; size_t remainder = pointer_delta(_max, new_top); if ((new_top > _max) || ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) { if (old_top != _max) { size_t fill_size = pointer_delta(_max, old_top); CollectedHeap::fill_with_object(old_top, fill_size); _summary_bytes_used += fill_size * HeapWordSize; } _allocation_region->set_top(_max); old_top = _bottom = _max; // Check if we've just used up the last min_region_size'd chunk // in the current region, and if so, allocate a new one. if (_bottom != _allocation_region->end()) { _max = _bottom + HeapRegion::min_region_size_in_words(); } else { if (!alloc_new_region()) { return NULL; } old_top = _allocation_region->bottom(); } } _allocation_region->set_top(old_top + word_size); _summary_bytes_used += word_size * HeapWordSize; return old_top; } void G1ArchiveAllocator::complete_archive(GrowableArray* ranges, size_t end_alignment_in_bytes) { assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes); assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize), "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize); // If we've allocated nothing, simply return. if (_allocation_region == NULL) { return; } // If an end alignment was requested, insert filler objects. if (end_alignment_in_bytes != 0) { HeapWord* currtop = _allocation_region->top(); HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes); size_t fill_size = pointer_delta(newtop, currtop); if (fill_size != 0) { if (fill_size < CollectedHeap::min_fill_size()) { // If the required fill is smaller than we can represent, // bump up to the next aligned address. We know we won't exceed the current // region boundary because the max supported alignment is smaller than the min // region size, and because the allocation code never leaves space smaller than // the min_fill_size at the top of the current allocation region. newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(), end_alignment_in_bytes); fill_size = pointer_delta(newtop, currtop); } HeapWord* fill = archive_mem_allocate(fill_size); CollectedHeap::fill_with_objects(fill, fill_size); } } // Loop through the allocated regions, and create MemRegions summarizing // the allocated address range, combining contiguous ranges. Add the // MemRegions to the GrowableArray provided by the caller. int index = _allocated_regions.length() - 1; assert(_allocated_regions.at(index) == _allocation_region, "expected region %u at end of array, found %u", _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()); HeapWord* base_address = _allocation_region->bottom(); HeapWord* top = base_address; while (index >= 0) { HeapRegion* next = _allocated_regions.at(index); HeapWord* new_base = next->bottom(); HeapWord* new_top = next->top(); if (new_base != top) { ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); base_address = new_base; } top = new_top; index = index - 1; } assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address)); ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); _allocated_regions.clear(); _allocation_region = NULL; };