src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8031320_8u Cdiff src/cpu/x86/vm/sharedRuntime_x86_64.cpp

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page
rev 5968 : 8031320: Use Intel RTM instructions for locks
Summary: Use RTM for inflated locks and stack locks.
Reviewed-by: iveresov, twisti, roland, dcubed

*** 2008,2017 **** --- 2008,2024 ---- __ subptr(rsp, stack_size - 2*wordSize); // Frame is now completed as far as size and linkage. int frame_complete = ((intptr_t)__ pc()) - start; + if (UseRTMLocking) { + // Abort RTM transaction before calling JNI + // because critical section will be large and will be + // aborted anyway. Also nmethod could be deoptimized. + __ xabort(0); + } + #ifdef ASSERT { Label L; __ mov(rax, rsp); __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
*** 3608,3617 **** --- 3615,3629 ---- assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); address start = __ pc(); + if (UseRTMLocking) { + // Abort RTM transaction before possible nmethod deoptimization. + __ xabort(0); + } + // Push self-frame. We get here with a return address on the // stack, so rsp is 8-byte aligned until we allocate our frame. __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog! // No callee saved registers. rbp is assumed implicitly saved
*** 3788,3797 **** --- 3800,3816 ---- address call_pc = NULL; int frame_size_in_words; bool cause_return = (poll_type == POLL_AT_RETURN); bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); + if (UseRTMLocking) { + // Abort RTM transaction before calling runtime + // because critical section will be large and will be + // aborted anyway. Also nmethod could be deoptimized. + __ xabort(0); + } + // Make room for return address (or push it again) if (!cause_return) { __ push(rbx); }
src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File