src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File
*** old/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Nov 3 14:16:01 2016
--- new/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Nov 3 14:16:01 2016
*** 391,401 ****
--- 391,401 ----
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
__ nop();
// generate code for exception handler
! address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) {
// not enough space left for the handler
bailout("exception handler overflow");
return -1;
}
*** 410,420 ****
--- 410,420 ----
__ verify_not_null_oop(rax);
// search an exception handler (rax: exception oop, rdx: throwing pc)
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
__ should_not_reach_here();
! guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
__ end_a_stub();
return offset;
}
*** 488,498 ****
--- 488,498 ----
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
__ nop();
// generate code for exception handler
! address handler_base = __ start_a_stub(deopt_handler_size());
if (handler_base == NULL) {
// not enough space left for the handler
bailout("deopt handler overflow");
return -1;
}
*** 500,510 ****
--- 500,510 ----
int offset = code_offset();
InternalAddress here(__ pc());
__ pushptr(here.addr());
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
! guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return offset;
}
*** 2803,2831 ****
--- 2803,2841 ----
}
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
! address stub = __ start_a_stub(call_stub_size());
if (stub == NULL) {
bailout("static call stub overflow");
return;
}
int start = __ offset();
if (os::is_MP()) {
// make sure that the displacement word of the call ends up word aligned
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
}
! __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
__ mov_metadata(rbx, (Metadata*)NULL);
// must be set to -1 at code generation time
assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
__ jump(RuntimeAddress(__ pc()));
assert(__ offset() - start <= call_stub_size, "stub too big");
+ if (UseAOT) {
+ // Trampoline to aot code
+ __ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */));
+ #ifdef _LP64
+ __ mov64(rax, CONST64(0)); // address is zapped till fixup time.
+ #else
+ __ movl(rax, 0xdeadffff); // address is zapped till fixup time.
+ #endif
+ __ jmp(rax);
+ }
+ assert(__ offset() - start <= call_stub_size(), "stub too big");
__ end_a_stub();
}
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File