src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File
*** old/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Wed Dec 2 18:36:55 2015
--- new/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp Wed Dec 2 18:36:54 2015
*** 49,58 ****
--- 49,62 ----
#define __ _masm->
#ifndef CC_INTERP
+ // Global Register Names
+ static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
+ static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
+
const int method_offset = frame::interpreter_frame_method_offset * wordSize;
const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
//-----------------------------------------------------------------------------
*** 93,158 ****
--- 97,168 ----
// expression stack must be empty before entering the VM if an
// exception happened
__ empty_expression_stack();
// setup parameters
// ??? convention: expect aberrant index in register ebx
! __ lea(c_rarg1, ExternalAddress((address)name));
! Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+ __ lea(rarg, ExternalAddress((address)name));
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::
throw_ArrayIndexOutOfBoundsException),
! c_rarg1, rbx);
return entry;
}
address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address entry = __ pc();
// object is at TOS
! __ pop(c_rarg1);
! Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+ __ pop(rarg);
// expression stack must be empty before entering the VM if an
// exception happened
__ empty_expression_stack();
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::
throw_ClassCastException),
! c_rarg1);
return entry;
}
address TemplateInterpreterGenerator::generate_exception_handler_common(
const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both");
address entry = __ pc();
+
+ Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+ Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
+
if (pass_oop) {
// object is at TOS
- __ pop(c_rarg2);
}
// expression stack must be empty before entering the VM if an
// exception happened
__ empty_expression_stack();
// setup parameters
! __ lea(c_rarg1, ExternalAddress((address)name));
if (pass_oop) {
__ call_VM(rax, CAST_FROM_FN_PTR(address,
InterpreterRuntime::
create_klass_exception),
! c_rarg1, c_rarg2);
! rarg, rarg2);
} else {
// kind of lame ExternalAddress can't take NULL because
// external_word_Relocation will assert.
if (message != NULL) {
- __ lea(c_rarg2, ExternalAddress((address)message));
} else {
- __ movptr(c_rarg2, NULL_WORD);
}
__ call_VM(rax,
CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
! c_rarg1, c_rarg2);
! rarg, rarg2);
}
// throw exception
__ jump(ExternalAddress(Interpreter::throw_exception_entry()));
return entry;
}
*** 168,177 ****
--- 178,211 ----
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
+ #ifndef _LP64
+ #ifdef COMPILER2
+ // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
+ if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
+ for (int i = 1; i < 8; i++) {
+ __ ffree(i);
+ }
+ } else if (UseSSE < 2) {
+ __ empty_FPU_stack();
+ }
+ #endif // COMPILER2
+ if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
+ __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
+ } else {
+ __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
+ }
+
+ if (state == ftos) {
+ __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
+ } else if (state == dtos) {
+ __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
+ }
+ #endif // _LP64
+
// Restore stack bottom in case i2c adjusted stack
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that esp is now tos until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
*** 198,230 ****
--- 232,275 ----
}
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
address entry = __ pc();
+
+ #ifndef _LP64
+ if (state == ftos) {
+ __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
+ } else if (state == dtos) {
+ __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
+ }
+ #endif // _LP64
+
// NULL last_sp until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp();
__ restore_locals();
+ const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+ NOT_LP64(__ get_thread(thread);)
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method.
if (UseJVMCICompiler) {
Label L;
- __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
__ jcc(Assembler::zero, L);
// Clear flag.
- __ movb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
// Satisfy calling convention for lock_method().
__ get_method(rbx);
// Take lock.
lock_method();
__ bind(L);
}
#endif
// handle exceptions
{
Label L;
- __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::zero, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_pending_exception));
__ should_not_reach_here();
*** 232,276 ****
--- 277,332 ----
}
__ dispatch_next(state, step);
return entry;
}
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;
switch (type) {
case T_BOOLEAN: i = 0; break;
case T_CHAR : i = 1; break;
case T_BYTE : i = 2; break;
case T_SHORT : i = 3; break;
case T_INT : i = 4; break;
case T_LONG : i = 5; break;
case T_VOID : i = 6; break;
case T_FLOAT : i = 7; break;
case T_DOUBLE : i = 8; break;
case T_OBJECT : i = 9; break;
case T_ARRAY : i = 9; break;
default : ShouldNotReachHere();
}
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
"index out of bounds");
return i;
}
address TemplateInterpreterGenerator::generate_result_handler_for(
BasicType type) {
address entry = __ pc();
switch (type) {
case T_BOOLEAN: __ c2bool(rax); break;
+ #ifndef _LP64
+ case T_CHAR : __ andptr(rax, 0xFFFF); break;
+ #else
case T_CHAR : __ movzwl(rax, rax); break;
+ #endif // _LP64
case T_BYTE : __ sign_extend_byte(rax); break;
case T_SHORT : __ sign_extend_short(rax); break;
case T_INT : /* nothing to do */ break;
case T_LONG : /* nothing to do */ break;
case T_VOID : /* nothing to do */ break;
+ #ifndef _LP64
+ case T_DOUBLE :
+ case T_FLOAT :
+ { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
+ __ pop(t); // remove return address first
+ // Must return a result for interpreter or compiler. In SSE
+ // mode, results are returned in xmm0 and the FPU stack must
+ // be empty.
+ if (type == T_FLOAT && UseSSE >= 1) {
+ // Load ST0
+ __ fld_d(Address(rsp, 0));
+ // Store as float and empty fpu stack
+ __ fstp_s(Address(rsp, 0));
+ // and reload
+ __ movflt(xmm0, Address(rsp, 0));
+ } else if (type == T_DOUBLE && UseSSE >= 2 ) {
+ __ movdbl(xmm0, Address(rsp, 0));
+ } else {
+ // restore ST0
+ __ fld_d(Address(rsp, 0));
+ }
+ // and pop the temp
+ __ addptr(rsp, 2 * wordSize);
+ __ push(t); // restore return address
+ }
+ break;
+ #else
case T_FLOAT : /* nothing to do */ break;
case T_DOUBLE : /* nothing to do */ break;
+ #endif // _LP64
+
case T_OBJECT :
// retrieve result from frame
__ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
// and verify it
__ verify_oop(rax);
*** 301,311 ****
--- 357,367 ----
//
// Note: checking for negative value instead of overflow
// so we have a 'sticky' overflow test
//
// rbx: method
! // ecx: invocation counter
! // rcx: invocation counter
//
void InterpreterGenerator::generate_counter_incr(
Label* overflow,
Label* profile_method,
Label* profile_method_continue) {
*** 381,394 ****
--- 437,450 ----
}
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// Asm interpreter on entry
! // r14/rdi - locals
! // r13/rsi - bcp
// rbx - method
! // edx - cpool --- DOES NOT APPEAR TO BE TRUE
! // rdx - cpool --- DOES NOT APPEAR TO BE TRUE
// rbp - interpreter frame
// On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
// Everything as it was on entry
// rdx is not restored. Doesn't appear to really be set.
*** 398,412 ****
--- 454,469 ----
// indicates if the counter overflow occurs at a backwards branch
// (NULL bcp). We pass zero for it. The call returns the address
// of the verified entry point for the method or NULL if the
// compilation did not complete (either went background or bailed
// out).
! __ movl(c_rarg1, 0);
! Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+ __ movl(rarg, 0);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::frequency_counter_overflow),
! c_rarg1);
__ movptr(rbx, Address(rbp, method_offset)); // restore Method*
// Preserve invariant that r13/r14 contain bcp/locals of sender frame
// and jump to the interpreted entry.
__ jmp(*do_continue, relocInfo::none);
*** 448,459 ****
--- 505,523 ----
__ jcc(Assembler::belowEqual, after_frame_check);
// compute rsp as if this were going to be the last frame on
// the stack before the red zone
! const Address stack_base(r15_thread, Thread::stack_base_offset());
! const Address stack_size(r15_thread, Thread::stack_size_offset());
! Label after_frame_check_pop;
! const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
+ #ifndef _LP64
+ __ push(rsi);
+ __ get_thread(thread);
+ #endif
+
+ const Address stack_base(thread, Thread::stack_base_offset());
+ const Address stack_size(thread, Thread::stack_size_offset());
// locals + overhead, in bytes
__ mov(rax, rdx);
__ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter.
__ addptr(rax, overhead_size);
*** 483,516 ****
--- 547,585 ----
// add in the red and yellow zone sizes
__ addptr(rax, max_pages * page_size);
// check against the current stack bottom
__ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check);
+
+ __ jcc(Assembler::above, after_frame_check_pop);
+ NOT_LP64(__ pop(rsi);) // get saved bcp
// Restore sender's sp as SP. This is necessary if the sender's
// frame is an extended compiled frame (see gen_c2i_adapter())
// and safer anyway in case of JSR292 adaptations.
__ pop(rax); // return address must be moved if SP is changed
! __ mov(rsp, r13);
! __ mov(rsp, rbcp);
__ push(rax);
// Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
__ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
+ // all done with frame size check
+ __ bind(after_frame_check_pop);
+ NOT_LP64(__ pop(rsi);)
// all done with frame size check
__ bind(after_frame_check);
}
// Allocate monitor and lock method (asm interpreter)
//
// Args:
// rbx: Method*
! // r14/rdi: locals
//
// Kills:
// rax
// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
// rscratch1, rscratch2 (scratch regs)
*** 538,548 ****
--- 607,617 ----
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
Label done;
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
// get receiver (assume this is frequent case)
! __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
! __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
__ jcc(Assembler::zero, done);
__ movptr(rax, Address(rbx, Method::const_offset()));
__ movptr(rax, Address(rax, ConstMethod::constants_offset()));
__ movptr(rax, Address(rax,
ConstantPool::pool_holder_offset_in_bytes()));
*** 564,594 ****
--- 633,664 ----
// add space for monitor & lock
__ subptr(rsp, entry_size); // add space for a monitor entry
__ movptr(monitor_block_top, rsp); // set new monitor block top
// store object
__ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
__ movptr(c_rarg1, rsp); // object address
__ lock_object(c_rarg1);
+ const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+ __ movptr(lockreg, rsp); // object address
+ __ lock_object(lockreg);
}
// Generate a fixed interpreter frame. This is identical setup for
// interpreted methods and for native methods hence the shared code.
//
// Args:
// rax: return address
// rbx: Method*
! // r14/rdi: pointer to locals
! // r13: sender sp
! // r13/rsi: sender sp
// rdx: cp cache
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// initialize fixed part of activation frame
__ push(rax); // save return address
__ enter(); // save old & set new rbp
! __ push(r13); // set sender sp
! __ push(rbcp); // set sender sp
__ push((int)NULL_WORD); // leave last_sp as null
! __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod*
! __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
! __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod*
! __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
__ push(rbx); // save Method*
if (ProfileInterpreter) {
Label method_data_continue;
__ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
__ testptr(rdx, rdx);
*** 602,616 ****
--- 672,686 ----
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
__ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
__ push(rdx); // set constant pool cache
! __ push(r14); // set locals pointer
! __ push(rlocals); // set locals pointer
if (native_call) {
__ push(0); // no bcp
} else {
! __ push(r13); // set bcp
! __ push(rbcp); // set bcp
}
__ push(0); // reserve word for pointer to expression stack bottom
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom
}
*** 665,693 ****
--- 735,772 ----
// rax: local 0
// rbx: method (but can be used as scratch now)
// rdx: scratch
// rdi: scratch
+ // Preserve the sender sp in case the pre-barrier
+ // calls the runtime
+ NOT_LP64(__ push(rsi);)
+
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
// Load the value of the referent field.
const Address field_address(rax, referent_offset);
__ load_heap_oop(rax, field_address);
+ const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
+ const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+ NOT_LP64(__ get_thread(thread);)
+
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
__ g1_write_barrier_pre(noreg /* obj */,
rax /* pre_val */,
- r15_thread /* thread */,
rbx /* tmp */,
true /* tosca_live */,
true /* expand_call */);
// _areturn
+ NOT_LP64(__ pop(rsi);) // get sender sp
__ pop(rdi); // get return address
! __ mov(rsp, r13); // set sp to sender sp
! __ mov(rsp, sender_sp); // set sp to sender sp
__ jmp(rdi);
__ ret(0);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
*** 699,880 ****
--- 778,796 ----
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return NULL;
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
*/
address InterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) {
address entry = __ pc();
// rbx,: Method*
// r13: senderSP must preserved for slow path, set SP to it on fast path
// c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
// c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters
const Register crc = rax; // crc
const Register val = c_rarg0; // source java byte value
const Register tbl = c_rarg1; // scratch
// Arguments are reversed on java expression stack
__ movl(val, Address(rsp, wordSize)); // byte value
__ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
__ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
__ notl(crc); // ~crc
__ update_byte_crc32(crc, val, tbl);
__ notl(crc); // ~crc
// result in rax
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, r13); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla native entry as the slow path
__ bind(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return NULL;
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
* int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
*/
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) {
address entry = __ pc();
// rbx,: Method*
// r13: senderSP must preserved for slow path, set SP to it on fast path
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters
const Register crc = c_rarg0; // crc
const Register buf = c_rarg1; // source java byte array address
const Register len = c_rarg2; // length
const Register off = len; // offset (never overlaps with 'len')
// Arguments are reversed on java expression stack
// Calculate address of start element
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
__ movptr(buf, Address(rsp, 3*wordSize)); // long buf
__ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
__ addq(buf, off); // + offset
__ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
} else {
__ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
__ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
__ addq(buf, off); // + offset
__ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
}
// Can now load 'len' since we're finished with 'off'
__ movl(len, Address(rsp, wordSize)); // Length
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
// result in rax
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, r13); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla native entry as the slow path
__ bind(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
return entry;
}
return NULL;
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
*/
address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
address entry = __ pc();
// Load parameters
const Register crc = c_rarg0; // crc
const Register buf = c_rarg1; // source java byte array address
const Register len = c_rarg2;
const Register off = c_rarg3; // offset
const Register end = len;
// Arguments are reversed on java expression stack
// Calculate address of start element
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
__ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
__ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
__ addq(buf, off); // + offset
__ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
// Note on 5 * wordSize vs. 4 * wordSize:
// * int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
// 4 2,3 1 0
// end starts at SP + 8
// The Java(R) Virtual Machine Specification Java SE 7 Edition
// 4.10.2.3. Values of Types long and double
// "When calculating operand stack length, values of type long and double have length two."
} else {
__ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
__ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
__ addq(buf, off); // + offset
__ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
}
__ movl(end, Address(rsp, wordSize)); // end
__ subl(end, off); // end - off
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
// result in rax
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, r13); // set sp to sender sp
__ jmp(rdi);
return entry;
}
return NULL;
}
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup.
address InterpreterGenerator::generate_native_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
// rbx: Method*
! // r13: sender sp
! // rbcp: sender sp
address entry_point = __ pc();
const Address constMethod (rbx, Method::const_offset());
const Address access_flags (rbx, Method::access_flags_offset());
*** 890,906 ****
--- 806,822 ----
// expression stack and the arguments are already on the stack and
// we only add a handful of words to the stack
// rbx: Method*
// rcx: size of parameters
! // r13: sender sp
! // rbcp: sender sp
__ pop(rax); // get return address
// for natives the size of locals is zero
! // compute beginning of parameters (rdi/r14)
! __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
! __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
// add 2 zero-initialized slots for native calls
// initialize result_handler slot
__ push((int) NULL_WORD);
// slot for oop temp
*** 933,943 ****
--- 849,861 ----
// would try to exit the monitor of synchronized methods which hasn't
// been entered yet, we set the thread local variable
// _do_not_unlock_if_synchronized to true. The remove_activation will
// check this flag.
! const Address do_not_unlock_if_synchronized(r15_thread,
! const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
+ NOT_LP64(__ get_thread(thread1);)
+ const Address do_not_unlock_if_synchronized(thread1,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
__ movbool(do_not_unlock_if_synchronized, true);
// increment invocation count & check for overflow
Label invocation_counter_overflow;
*** 949,958 ****
--- 867,877 ----
__ bind(continue_after_compile);
bang_stack_shadow_pages(true);
// reset the _do_not_unlock_if_synchronized flag
+ NOT_LP64(__ get_thread(thread1);)
__ movbool(do_not_unlock_if_synchronized, false);
// check for synchronized methods
// Must happen AFTER invocation_counter check and stack overflow check,
// so method is not locked if overflows.
*** 989,1009 ****
--- 908,937 ----
// jvmti support
__ notify_method_entry();
// work registers
const Register method = rbx;
! const Register t = r11;
! const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+ const Register t = NOT_LP64(rcx) LP64_ONLY(r11);
// allocate space for parameters
__ get_method(method);
__ movptr(t, Address(method, Method::const_offset()));
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
+
+ #ifndef _LP64
+ __ shlptr(t, Interpreter::logStackElementSize);
+ __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
+ __ subptr(rsp, t);
+ __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
+ #else
__ shll(t, Interpreter::logStackElementSize);
__ subptr(rsp, t);
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
+ #endif // _LP64
// get signature handler
{
Label L;
__ movptr(t, Address(method, Method::signature_handler_offset()));
*** 1017,1031 ****
--- 945,959 ----
__ movptr(t, Address(method, Method::signature_handler_offset()));
__ bind(L);
}
// call signature handler
! assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
! assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
"adjust this code");
assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
"adjust this code");
! assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
"adjust this code");
// The generated handlers do not touch RBX (the method oop).
// However, large signatures cannot be cached and are generated
// each time here. The slow-path generator can do a GC on return,
*** 1054,1075 ****
--- 982,1007 ----
__ movptr(t, Address(t, mirror_offset));
// copy mirror into activation frame
__ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
t);
// pass handle to mirror
+ #ifndef _LP64
+ __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+ __ movptr(Address(rsp, wordSize), t);
+ #else
__ lea(c_rarg1,
Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+ #endif // _LP64
__ bind(L);
}
// get native function entry point
{
Label L;
__ movptr(rax, Address(method, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
! __ movptr(rscratch2, unsatisfied.addr());
__ cmpptr(rax, rscratch2);
! __ cmpptr(rax, unsatisfied.addr());
__ jcc(Assembler::notEqual, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::prepare_native_call),
method);
*** 1077,1126 ****
--- 1009,1099 ----
__ movptr(rax, Address(method, Method::native_function_offset()));
__ bind(L);
}
// pass JNIEnv
+ #ifndef _LP64
+ __ get_thread(thread);
+ __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
+ __ movptr(Address(rsp, 0), t);
+
+ // set_last_Java_frame_before_call
+ // It is enough that the pc()
+ // points into the right code segment. It does not have to be the correct return pc.
+ __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+ #else
__ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
// It is enough that the pc() points into the right code
// segment. It does not have to be the correct return pc.
__ set_last_Java_frame(rsp, rbp, (address) __ pc());
+ #endif // _LP64
// change thread state
#ifdef ASSERT
{
Label L;
- __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
__ cmpl(t, _thread_in_Java);
__ jcc(Assembler::equal, L);
__ stop("Wrong thread state in native stub");
__ bind(L);
}
#endif
// Change state to native
- __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
_thread_in_native);
// Call the native method.
__ call(rax);
// result potentially in rax or xmm0
+ // 32: result potentially in rdx:rax or ST0
+ // 64: result potentially in rax or xmm0
// Verify or restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni();
// NOTE: The order of these pushes is known to frame::interpreter_frame_result
// in order to extract the result of a method call. If the order of these
// pushes change or anything else is added to the stack then the code in
// interpreter_frame_result must also change.
+ #ifndef _LP64
+ // save potential result in ST(0) & rdx:rax
+ // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
+ // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
+ // It is safe to do this push because state is _thread_in_native and return address will be found
+ // via _last_native_pc and not via _last_jave_sp
+
+ // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
+ // If the order changes or anything else is added to the stack the code in
+ // interpreter_frame_result will have to be changed.
+
+ { Label L;
+ Label push_double;
+ ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
+ ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
+ __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
+ float_handler.addr());
+ __ jcc(Assembler::equal, push_double);
+ __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
+ double_handler.addr());
+ __ jcc(Assembler::notEqual, L);
+ __ bind(push_double);
+ __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
+ __ bind(L);
+ }
+ #else
__ push(dtos);
+ #endif // _LP64
+
__ push(ltos);
// change thread state
__ movl(Address(r15_thread, JavaThread::thread_state_offset()),
+ NOT_LP64(__ get_thread(thread);)
+ __ movl(Address(thread, JavaThread::thread_state_offset()),
_thread_in_native_trans);
if (os::is_MP()) {
if (UseMembar) {
// Force this write out before the read below
*** 1130,1180 ****
--- 1103,1168 ----
} else {
// Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific
// offset to write to within the page. This minimizes bus traffic
// due to cache line collision.
! __ serialize_memory(r15_thread, rscratch2);
! __ serialize_memory(thread, rcx);
}
}
+ #ifndef _LP64
+ if (AlwaysRestoreFPU) {
+ // Make sure the control word is correct.
+ __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+ }
+ #endif // _LP64
+
// check for safepoint operation in progress and/or pending suspend requests
{
Label Continue;
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
Label L;
__ jcc(Assembler::notEqual, L);
- __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::equal, Continue);
__ bind(L);
// Don't use call_VM as it will see a possible pending exception
// and forward it and never return here preventing us from
// clearing _last_native_pc down below. Also can't use
// call_VM_leaf either as it will check to see if r13 & r14 are
// preserved and correspond to the bcp/locals pointers. So we do a
// runtime call by hand.
//
+ #ifndef _LP64
+ __ push(thread);
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+ JavaThread::check_special_condition_for_native_trans)));
+ __ increment(rsp, wordSize);
+ __ get_thread(thread);
+ #else
__ mov(c_rarg0, r15_thread);
__ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
+ #endif // _LP64
__ bind(Continue);
}
// change thread state
- __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
// reset_last_Java_frame
! __ reset_last_Java_frame(thread, true, true);
// reset handle block
- __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
__ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// If result is an oop unbox and store it in frame where gc will see it
// and result handler will pick it up
*** 1188,1235 ****
--- 1176,1229 ----
__ testptr(rax, rax);
__ jcc(Assembler::zero, store_result);
__ movptr(rax, Address(rax, 0));
__ bind(store_result);
__ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
! // keep stack depth as expected by pushing oop which will eventually be discarded
__ push(ltos);
__ bind(no_oop);
}
{
Label no_reguard;
- __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
JavaThread::stack_guard_yellow_disabled);
__ jcc(Assembler::notEqual, no_reguard);
__ pusha(); // XXX only save smashed registers
+ #ifndef _LP64
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
+ __ popa();
+ #else
__ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
__ mov(rsp, r12); // restore sp
__ popa(); // XXX only restore smashed registers
__ reinit_heapbase();
+ #endif // _LP64
__ bind(no_reguard);
}
// The method register is junk from after the thread_in_native transition
// until here. Also can't call_VM until the bcp has been
// restored. Need bcp for throwing exception below so get it now.
__ get_method(method);
! // restore rsi/r13 to have legal interpreter frame, i.e., bci == 0 <=>
// r13 == code_base()
! __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod*
! __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
! __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod*
! __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
+
// handle exceptions (exception handling will handle unlocking!)
{
Label L;
- __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::zero, L);
// Note: At some point we may want to unify this with the code
// used in call_VM_base(); i.e., we should use the
// StubRoutines::forward_exception code. For now this doesn't work
// here because the rsp is not correctly set at this point.
*** 1253,1279 ****
--- 1247,1275 ----
// BasicObjectLock will be first in list, since this is a
// synchronized method. However, need to check that the object
// has not been unlocked by an explicit monitorexit bytecode.
const Address monitor(rbp,
(intptr_t)(frame::interpreter_frame_initial_sp_offset *
! wordSize - (int)sizeof(BasicObjectLock)));
+
+ const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
// monitor expect in c_rarg1 for slow unlock path
! __ lea(c_rarg1, monitor); // address of first monitor
! __ lea(regmon, monitor); // address of first monitor
! __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
! __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
__ testptr(t, t);
__ jcc(Assembler::notZero, unlock);
// Entry already unlocked, need to throw exception
__ MacroAssembler::call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_illegal_monitor_state_exception));
__ should_not_reach_here();
__ bind(unlock);
! __ unlock_object(c_rarg1);
! __ unlock_object(regmon);
}
__ bind(L);
}
// jvmti support
*** 1285,1295 ****
--- 1281,1291 ----
// restore potential result in edx:eax, call result handler to
// restore potential result in ST0 & handle result
__ pop(ltos);
__ pop(dtos);
+ LP64_ONLY( __ pop(dtos);)
__ movptr(t, Address(rbp,
(frame::interpreter_frame_result_handler_offset) * wordSize));
__ call(t);
*** 1317,1327 ****
--- 1313,1323 ----
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
// ebx: Method*
! // r13: sender sp
! // rbcp: sender sp
address entry_point = __ pc();
const Address constMethod(rbx, Method::const_offset());
const Address access_flags(rbx, Method::access_flags_offset());
const Address size_of_parameters(rdx,
*** 1333,1343 ****
--- 1329,1339 ----
__ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
// rbx: Method*
// rcx: size of parameters
! // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
! // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
__ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
__ subl(rdx, rcx); // rdx = no. of additional locals
// YYY
*** 1349,1359 ****
--- 1345,1355 ----
// get return address
__ pop(rax);
// compute beginning of parameters (r14)
! __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
! __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
// rdx - # of additional locals
// allocate space for locals
// explicitly initialize locals
{
*** 1393,1403 ****
--- 1389,1401 ----
// handler would try to exit the monitor of synchronized methods
// which hasn't been entered yet, we set the thread local variable
// _do_not_unlock_if_synchronized to true. The remove_activation
// will check this flag.
! const Address do_not_unlock_if_synchronized(r15_thread,
! const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+ NOT_LP64(__ get_thread(thread);)
+ const Address do_not_unlock_if_synchronized(thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
__ movbool(do_not_unlock_if_synchronized, true);
__ profile_parameters_type(rax, rcx, rdx);
// increment invocation count & check for overflow
*** 1418,1427 ****
--- 1416,1426 ----
// check for synchronized interpreted methods
bang_stack_shadow_pages(false);
// reset the _do_not_unlock_if_synchronized flag
+ NOT_LP64(__ get_thread(thread);)
__ movbool(do_not_unlock_if_synchronized, false);
// check for synchronized methods
// Must happen AFTER invocation_counter check and stack overflow check,
// so method is not locked if overflows.
*** 1477,1522 ****
--- 1476,1485 ----
}
return entry_point;
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
switch (method_kind(m)) {
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp :
return false;
default:
return true;
}
}
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int entry_size = frame::interpreter_frame_monitor_size();
// total overhead size: entry_size + (saved rbp thru expr stack
// bottom). be sure to change this if you add/subtract anything
// to/from the overhead area
const int overhead_size =
-(frame::interpreter_frame_initial_sp_offset) + entry_size;
const int stub_code = frame::entry_frame_after_call_words;
const int method_stack = (method->max_locals() + method->max_stack()) *
Interpreter::stackElementWords;
return (overhead_size + method_stack + stub_code);
}
//-----------------------------------------------------------------------------
// Exceptions
void TemplateInterpreterGenerator::generate_throw_exception() {
// Entry point in previous activation (i.e., if the caller was
*** 1525,1556 ****
--- 1488,1520 ----
// Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing.
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
// rax: exception
// rdx: return address/pc that threw exception
! __ restore_bcp(); // r13/rsi points to call/send
__ restore_locals();
! LP64_ONLY(__ reinit_heapbase();) // restore r12 as heapbase.
// Entry point for exceptions thrown within interpreter code
Interpreter::_throw_exception_entry = __ pc();
// expression stack is undefined here
// rax: exception
! // r13/rsi: exception bcp
__ verify_oop(rax);
! __ mov(c_rarg1, rax);
! Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+ LP64_ONLY(__ mov(c_rarg1, rax);)
// expression stack must be empty before entering the VM in case of
// an exception
__ empty_expression_stack();
// find exception handler address and preserve exception oop
__ call_VM(rdx,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::exception_handler_for_exception),
! c_rarg1);
// rax: exception handler entry point
// rdx: preserved exception oop
! // r13/rsi: bcp for exception handler
__ push_ptr(rdx); // push exception which is now the only value on the stack
__ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
// If the exception is not handled in the current frame the frame is
// removed and the exception is rethrown (i.e. exception
*** 1573,1585 ****
--- 1537,1551 ----
__ empty_expression_stack();
// Set the popframe_processing bit in pending_popframe_condition
// indicating that we are currently handling popframe, so that
// call_VMs that may happen later do not trigger new popframe
// handling cycles.
! __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
! const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+ NOT_LP64(__ get_thread(thread);)
+ __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
__ orl(rdx, JavaThread::popframe_processing_bit);
- __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
{
// Check to see whether we are returning to a deoptimized frame.
// (The PopFrame call ensures that the caller of the popped frame is
// either interpreted or compiled and deoptimizes it if compiled.)
*** 1589,1628 ****
--- 1555,1597 ----
//
// Note that we don't compare the return PC against the
// deoptimization blob's unpack entry because of the presence of
// adapter frames in C2.
Label caller_not_deoptimized;
! __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
! Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+ __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
! InterpreterRuntime::interpreter_contains), c_rarg1);
__ testl(rax, rax);
__ jcc(Assembler::notZero, caller_not_deoptimized);
// Compute size of arguments for saving when returning to
// deoptimized caller
__ get_method(rax);
__ movptr(rax, Address(rax, Method::const_offset()));
__ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize);
- __ restore_locals(); // XXX do we need this?
! __ subptr(r14, rax);
! __ addptr(r14, wordSize);
! __ subptr(rlocals, rax);
! __ addptr(rlocals, wordSize);
// Save these arguments
+ NOT_LP64(__ get_thread(thread);)
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
Deoptimization::
popframe_preserve_args),
! r15_thread, rax, r14);
! thread, rax, rlocals);
__ remove_activation(vtos, rdx,
/* throw_monitor_exception */ false,
/* install_monitor_exception */ false,
/* notify_jvmdi */ false);
// Inform deoptimization that it is responsible for restoring
// these arguments
__ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
+ NOT_LP64(__ get_thread(thread);)
+ __ movl(Address(thread, JavaThread::popframe_condition_offset()),
JavaThread::popframe_force_deopt_reexecution_bit);
// Continue in deoptimization handler
__ jmp(rdx);
*** 1643,1688 ****
--- 1612,1669 ----
// no space between the top of the expression stack (current
// last_sp) and the top of stack. Rather than force deopt to
// maintain this kind of invariant all the time we call a small
// fixup routine to move the mutated arguments onto the top of our
// expression stack if necessary.
+ #ifndef _LP64
+ __ mov(rax, rsp);
+ __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ get_thread(thread);
+ // PC must point into interpreter here
+ __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
+ __ get_thread(thread);
+ #else
__ mov(c_rarg1, rsp);
__ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// PC must point into interpreter here
__ set_last_Java_frame(noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
__ reset_last_Java_frame(true, true);
+ #endif
+ __ reset_last_Java_frame(thread, true, true);
+
// Restore the last_sp and null it out
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
- __ restore_bcp(); // XXX do we need this?
- __ restore_locals(); // XXX do we need this?
// The method data pointer was incremented already during
// call profiling. We have to restore the mdp for the current bcp.
if (ProfileInterpreter) {
__ set_method_data_pointer_for_bcp();
}
// Clear the popframe condition flag
__ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
+ NOT_LP64(__ get_thread(thread);)
+ __ movl(Address(thread, JavaThread::popframe_condition_offset()),
JavaThread::popframe_inactive);
#if INCLUDE_JVMTI
{
Label L_done;
! const Register local0 = r14;
! const Register local0 = rlocals;
! __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
! __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic);
__ jcc(Assembler::notEqual, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ get_method(rdx);
__ movptr(rax, Address(local0, 0));
! __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
! __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp);
__ testptr(rax, rax);
__ jcc(Assembler::zero, L_done);
__ movptr(Address(rbx, 0), rax);
*** 1695,1709 ****
--- 1676,1692 ----
Interpreter::_remove_activation_entry = __ pc();
// preserve exception over this code sequence
__ pop_ptr(rax);
__ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
+ NOT_LP64(__ get_thread(thread);)
+ __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false);
// restore exception
__ get_vm_result(rax, r15_thread);
+ NOT_LP64(__ get_thread(thread);)
+ __ get_vm_result(rax, thread);
// In between activations - previous activation type unknown yet
// compute continuation point - the continuation point expects the
// following registers set up:
//
*** 1713,1723 ****
--- 1696,1706 ----
// rbp: ebp of caller
__ push(rax); // save exception
__ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address),
- r15_thread, rdx);
__ mov(rbx, rax); // save exception handler
__ pop(rdx); // restore return address
__ pop(rax); // restore exception
// Note that an "issuing PC" is actually the next PC after the call
__ jmp(rbx); // jump to exception
*** 1732,1745 ****
--- 1715,1730 ----
address entry = __ pc();
__ restore_bcp();
__ restore_locals();
__ empty_expression_stack();
! __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse
! __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
! const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+ NOT_LP64(__ get_thread(thread);)
+ __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
+ Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state
__ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
__ remove_activation(state, rsi,
*** 1766,1777 ****
--- 1751,1767 ----
address& dep,
address& vep) {
assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
Label L;
aep = __ pc(); __ push_ptr(); __ jmp(L);
+ #ifndef _LP64
+ fep = __ pc(); __ push(ftos); __ jmp(L);
+ dep = __ pc(); __ push(dtos); __ jmp(L);
+ #else
fep = __ pc(); __ push_f(xmm0); __ jmp(L);
dep = __ pc(); __ push_d(xmm0); __ jmp(L);
+ #endif // _LP64
lep = __ pc(); __ push_l(); __ jmp(L);
bep = cep = sep =
iep = __ pc(); __ push_i();
vep = __ pc();
__ bind(L);
*** 1792,1804 ****
--- 1782,1808 ----
//-----------------------------------------------------------------------------
// Non-product code
#ifndef PRODUCT
+
address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
address entry = __ pc();
+ #ifndef _LP64
+ // prepare expression stack
+ __ pop(rcx); // pop return address so expression stack is 'pure'
+ __ push(state); // save tosca
+
+ // pass tosca registers as arguments & call tracer
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
+ __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
+ __ pop(state); // restore tosca
+
+ // return
+ __ jmp(rcx);
+ #else
__ push(state);
__ push(c_rarg0);
__ push(c_rarg1);
__ push(c_rarg2);
__ push(c_rarg3);
*** 1813,1822 ****
--- 1817,1827 ----
__ pop(c_rarg2);
__ pop(c_rarg1);
__ pop(c_rarg0);
__ pop(state);
__ ret(0); // return from result handler
+ #endif // _LP64
return entry;
}
void TemplateInterpreterGenerator::count_bytecode() {
*** 1844,1858 ****
--- 1849,1867 ----
// The run-time runtime saves the right registers, depending on
// the tosca in-state for the given template.
assert(Interpreter::trace_code(t->tos_in()) != NULL,
"entry must have been generated");
+ #ifndef _LP64
+ __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
+ #else
__ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
+ #endif // _LP64
}
void TemplateInterpreterGenerator::stop_interpreter_at() {
Label L;
src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File