# HG changeset patch # User jcbeyler # Date 1513640300 28800 # Mon Dec 18 15:38:20 2017 -0800 # Node ID b1b970c1514beb9fc79f91acc2e985e670cad25b # Parent d85284ccd1bd865ebb1391d921be7a8ebfc5f2c9 8191027: JDK-8190862 work for arch x86/x64 Summary: Fixed Interpreter never refills TLAB Reviewed-by: tschatzl, mdoerr, rehn diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -3844,7 +3844,6 @@ Label done; Label initialize_header; Label initialize_object; // including clearing the fields - Label allocate_shared; __ get_cpool_and_tags(rcx, rax); @@ -3870,12 +3869,19 @@ __ testl(rdx, Klass::_lh_instance_slow_path_bit); __ jcc(Assembler::notZero, slow_case); + // Allocate the instance: + // If TLAB is enabled: + // Try to allocate in the TLAB. + // If fails, go to the slow path. + // Else If inline contiguous allocations are enabled: + // Try to allocate in eden. + // If fails due to heap end, go to slow path. // - // Allocate the instance - // 1) Try to allocate in the TLAB - // 2) if fail and the object is large allocate in the shared Eden - // 3) if the above fails (or is not applicable), go to a slow case - // (creates a new TLAB, etc.) + // If TLAB is enabled OR inline contiguous is enabled: + // Initialize the allocation. + // Exit. + // + // Go to slow path. const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc(); @@ -3891,7 +3897,7 @@ __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); __ lea(rbx, Address(rax, rdx, Address::times_1)); __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); - __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); + __ jcc(Assembler::above, slow_case); __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); if (ZeroTLAB) { // the fields have been already cleared @@ -3900,40 +3906,40 @@ // initialize both the header and fields __ jmp(initialize_object); } + } else { + // Allocation in the shared Eden, if allowed. + // + // rdx: instance size in bytes + if (allow_shared_alloc) { + ExternalAddress heap_top((address)Universe::heap()->top_addr()); + ExternalAddress heap_end((address)Universe::heap()->end_addr()); + + Label retry; + __ bind(retry); + __ movptr(rax, heap_top); + __ lea(rbx, Address(rax, rdx, Address::times_1)); + __ cmpptr(rbx, heap_end); + __ jcc(Assembler::above, slow_case); + + // Compare rax, with the top addr, and if still equal, store the new + // top addr in rbx, at the address of the top addr pointer. Sets ZF if was + // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. + // + // rax,: object begin + // rbx,: object end + // rdx: instance size in bytes + __ locked_cmpxchgptr(rbx, heap_top); + + // if someone beat us on the allocation, try again, otherwise continue + __ jcc(Assembler::notEqual, retry); + + __ incr_allocated_bytes(thread, rdx, 0); + } } - // Allocation in the shared Eden, if allowed. - // - // rdx: instance size in bytes - if (allow_shared_alloc) { - __ bind(allocate_shared); - - ExternalAddress heap_top((address)Universe::heap()->top_addr()); - ExternalAddress heap_end((address)Universe::heap()->end_addr()); - - Label retry; - __ bind(retry); - __ movptr(rax, heap_top); - __ lea(rbx, Address(rax, rdx, Address::times_1)); - __ cmpptr(rbx, heap_end); - __ jcc(Assembler::above, slow_case); - - // Compare rax, with the top addr, and if still equal, store the new - // top addr in rbx, at the address of the top addr pointer. Sets ZF if was - // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. - // - // rax,: object begin - // rbx,: object end - // rdx: instance size in bytes - __ locked_cmpxchgptr(rbx, heap_top); - - // if someone beat us on the allocation, try again, otherwise continue - __ jcc(Assembler::notEqual, retry); - - __ incr_allocated_bytes(thread, rdx, 0); - } - - if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { + // If UseTLAB or allow_shared_alloc are true, the object is created above and + // there is an initialize need. Otherwise, skip and go to the slow path. + if (UseTLAB || allow_shared_alloc) { // The object is initialized before the header. If the object size is // zero, go directly to the header initialization. __ bind(initialize_object);