< prev index next >
src/share/vm/gc/g1/g1Allocator.cpp
Print this page
rev 8789 : [mq]: 8073052-Rename-and-clean-up-the-allocation-manager-hierarchy-in-g1Allocator
rev 8790 : imported patch 8003237-no-wait-for-free-list
rev 8791 : imported patch jon-fast-evac-failure
*** 77,86 ****
--- 77,88 ----
}
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
assert_at_safepoint(true /* should_be_vm_thread */);
+ G1Allocator::init_gc_alloc_regions(evacuation_info);
+
_survivor_gc_alloc_region.init();
_old_gc_alloc_region.init();
reuse_retained_old_region(evacuation_info,
&_old_gc_alloc_region,
&_retained_old_gc_alloc_region);
*** 145,165 ****
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
! if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
_g1h->dirty_young_block(result, word_size);
}
return result;
--- 147,186 ----
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
+ bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
+ return _survivor_is_full;
+ }
+
+ bool G1Allocator::old_is_full(AllocationContext_t context) const {
+ return _old_is_full;
+ }
+
+ void G1Allocator::set_survivor_full(AllocationContext_t context) {
+ _survivor_is_full = true;
+ }
+
+ void G1Allocator::set_old_full(AllocationContext_t context) {
+ _old_is_full = true;
+ }
+
HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
! if (result == NULL && !survivor_is_full(context)) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
+ if (result == NULL) {
+ set_survivor_full(context);
+ }
}
if (result != NULL) {
_g1h->dirty_young_block(result, word_size);
}
return result;
*** 170,215 ****
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
! if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
_g1h(G1CollectedHeap::heap()),
_allocator(allocator),
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
}
! HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
! AllocationContext_t context) {
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1PLAB* alloc_buf = alloc_buffer(dest, context);
alloc_buf->retire();
HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context);
! if (buf == NULL) {
! return NULL; // Let caller handle allocation failure.
! }
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
HeapWord* const obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
return obj;
- } else {
- return _allocator->par_allocate_during_gc(dest, word_sz, context);
}
}
void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
}
--- 191,246 ----
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
! if (result == NULL && !old_is_full(context)) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
+ if (result == NULL) {
+ set_old_full(context);
+ }
}
return result;
}
+ void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
+ _survivor_is_full = false;
+ _old_is_full = false;
+ }
+
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
_g1h(G1CollectedHeap::heap()),
_allocator(allocator),
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
}
! HeapWord* G1PLABAllocator::allocate_inline_or_new_plab(InCSetState dest,
size_t word_sz,
! AllocationContext_t context,
! bool* plab_refill_failed) {
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1PLAB* alloc_buf = alloc_buffer(dest, context);
alloc_buf->retire();
HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context);
! if (buf != NULL) {
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
HeapWord* const obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
return obj;
}
+ // Otherwise.
+ *plab_refill_failed = true;
+ }
+ // Try inline allocation.
+ return _allocator->par_allocate_during_gc(dest, word_sz, context);
}
void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
}
< prev index next >