< prev index next >

src/hotspot/cpu/x86/c1_Runtime1_x86.cpp

Print this page
rev 51052 : 8207252: C1 still does eden allocations when TLAB is enabled
Summary: Only do eden allocations when TLAB is disabled
Reviewed-by: kbarrett, jrose, tschatzl
Contributed-by: jcbeyler@google.com

*** 1011,1021 **** } else { assert(id == fast_new_instance_init_check_id, "bad StubID"); __ set_info("fast new_instance init check", dont_gc_arguments); } ! if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Label slow_path; Register obj_size = rcx; Register t1 = rbx; Register t2 = rsi; --- 1011,1024 ---- } else { assert(id == fast_new_instance_init_check_id, "bad StubID"); __ set_info("fast new_instance init check", dont_gc_arguments); } ! // If TLAB is disabled, see if there is support for inlining contiguous ! // allocations. ! // Otherwise, just go to the slow path. ! if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Label slow_path; Register obj_size = rcx; Register t1 = rbx; Register t2 = rsi;
*** 1044,1060 **** __ should_not_reach_here(); __ bind(ok); } #endif // ASSERT - // if we got here then the TLAB allocation failed, so try - // refilling the TLAB or allocating directly from eden. - Label retry_tlab, try_eden; const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); NOT_LP64(__ get_thread(thread)); - __ bind(try_eden); // get the instance size (size is postive so movl is fine for 64bit) __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path); --- 1047,1059 ----
*** 1131,1143 **** __ should_not_reach_here(); __ bind(ok); } #endif // ASSERT ! // If we got here, the TLAB allocation failed, so try allocating from ! // eden if inline contiguous allocations are supported. ! if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Register arr_size = rsi; Register t1 = rcx; // must be rcx for use as shift count Register t2 = rdi; Label slow_path; --- 1130,1143 ---- __ should_not_reach_here(); __ bind(ok); } #endif // ASSERT ! // If TLAB is disabled, see if there is support for inlining contiguous ! // allocations. ! // Otherwise, just go to the slow path. ! if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { Register arr_size = rsi; Register t1 = rcx; // must be rcx for use as shift count Register t2 = rdi; Label slow_path;
< prev index next >