< prev index next >

src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp

Print this page
rev 49852 : imported patch 8191471-g1-retained-mutator-region

*** 52,83 **** } return NULL; } inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) { - const size_t aligned_obj_size = align_object_size(obj_size); - // Compute the size for the new TLAB. // The "last" tlab may be smaller to reduce fragmentation. // unsafe_max_tlab_alloc is just a hint. const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / HeapWordSize; ! size_t new_tlab_size = MIN3(available_size, desired_size() + aligned_obj_size, max_size()); // Make sure there's enough room for object and filler int[]. ! const size_t obj_plus_filler_size = aligned_obj_size + alignment_reserve(); ! if (new_tlab_size < obj_plus_filler_size) { // If there isn't enough room for the allocation, return failure. log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns failure", obj_size); return 0; } log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns " SIZE_FORMAT, obj_size, new_tlab_size); return new_tlab_size; } void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) { // Raise size required to bypass TLAB next time. Why? Else there's // a risk that a thread that repeatedly allocates objects of one // size will get stuck on this slow path. --- 52,84 ---- } return NULL; } inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) { // Compute the size for the new TLAB. // The "last" tlab may be smaller to reduce fragmentation. // unsafe_max_tlab_alloc is just a hint. const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / HeapWordSize; ! size_t new_tlab_size = MIN3(available_size, desired_size() + align_object_size(obj_size), max_size()); // Make sure there's enough room for object and filler int[]. ! if (new_tlab_size < compute_min_size(obj_size)) { // If there isn't enough room for the allocation, return failure. log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns failure", obj_size); return 0; } log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns " SIZE_FORMAT, obj_size, new_tlab_size); return new_tlab_size; } + inline size_t ThreadLocalAllocBuffer::compute_min_size(size_t obj_size) { + const size_t aligned_obj_size = align_object_size(obj_size); + return aligned_obj_size + alignment_reserve(); + } void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) { // Raise size required to bypass TLAB next time. Why? Else there's // a risk that a thread that repeatedly allocates objects of one // size will get stuck on this slow path.
< prev index next >