src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

Print this page




 376 // This specifies the rsp decrement needed to build the frame
 377 int LIR_Assembler::initial_frame_size_in_bytes() const {
 378   // if rounding, must let FrameMap know!
 379 
 380   // The frame_map records size in slots (32bit word)
 381 
 382   // subtract two words to account for return address and link
 383   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 384 }
 385 
 386 
 387 int LIR_Assembler::emit_exception_handler() {
 388   // if the last instruction is a call (typically to do a throw which
 389   // is coming at the end after block reordering) the return address
 390   // must still point into the code area in order to avoid assertion
 391   // failures when searching for the corresponding bci => add a nop
 392   // (was bug 5/14/1999 - gri)
 393   __ nop();
 394 
 395   // generate code for exception handler
 396   address handler_base = __ start_a_stub(exception_handler_size);
 397   if (handler_base == NULL) {
 398     // not enough space left for the handler
 399     bailout("exception handler overflow");
 400     return -1;
 401   }
 402 
 403   int offset = code_offset();
 404 
 405   // the exception oop and pc are in rax, and rdx
 406   // no other registers need to be preserved, so invalidate them
 407   __ invalidate_registers(false, true, true, false, true, true);
 408 
 409   // check that there is really an exception
 410   __ verify_not_null_oop(rax);
 411 
 412   // search an exception handler (rax: exception oop, rdx: throwing pc)
 413   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
 414   __ should_not_reach_here();
 415   guarantee(code_offset() - offset <= exception_handler_size, "overflow");
 416   __ end_a_stub();
 417 
 418   return offset;
 419 }
 420 
 421 
 422 // Emit the code to remove the frame from the stack in the exception
 423 // unwind path.
 424 int LIR_Assembler::emit_unwind_handler() {
 425 #ifndef PRODUCT
 426   if (CommentedAssembly) {
 427     _masm->block_comment("Unwind handler");
 428   }
 429 #endif
 430 
 431   int offset = code_offset();
 432 
 433   // Fetch the exception from TLS and clear out exception related thread state
 434   Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
 435   NOT_LP64(__ get_thread(rsi));


 473   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 474 
 475   // Emit the slow path assembly
 476   if (stub != NULL) {
 477     stub->emit_code(this);
 478   }
 479 
 480   return offset;
 481 }
 482 
 483 
 484 int LIR_Assembler::emit_deopt_handler() {
 485   // if the last instruction is a call (typically to do a throw which
 486   // is coming at the end after block reordering) the return address
 487   // must still point into the code area in order to avoid assertion
 488   // failures when searching for the corresponding bci => add a nop
 489   // (was bug 5/14/1999 - gri)
 490   __ nop();
 491 
 492   // generate code for exception handler
 493   address handler_base = __ start_a_stub(deopt_handler_size);
 494   if (handler_base == NULL) {
 495     // not enough space left for the handler
 496     bailout("deopt handler overflow");
 497     return -1;
 498   }
 499 
 500   int offset = code_offset();
 501   InternalAddress here(__ pc());
 502 
 503   __ pushptr(here.addr());
 504   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 505   guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
 506   __ end_a_stub();
 507 
 508   return offset;
 509 }
 510 
 511 
 512 void LIR_Assembler::return_op(LIR_Opr result) {
 513   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 514   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 515     assert(result->fpu() == 0, "result must already be on TOS");
 516   }
 517 
 518   // Pop the stack before the safepoint code
 519   __ remove_frame(initial_frame_size_in_bytes());
 520 
 521   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 522     __ reserved_stack_check();
 523   }
 524 
 525   bool result_is_oop = result->is_valid() ? result->is_oop() : false;


2788 }
2789 
2790 
2791 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2792   __ ic_call(op->addr());
2793   add_call_info(code_offset(), op->info());
2794   assert(!os::is_MP() ||
2795          (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2796          "must be aligned");
2797 }
2798 
2799 
2800 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2801 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2802   ShouldNotReachHere();
2803 }
2804 
2805 
2806 void LIR_Assembler::emit_static_call_stub() {
2807   address call_pc = __ pc();
2808   address stub = __ start_a_stub(call_stub_size);
2809   if (stub == NULL) {
2810     bailout("static call stub overflow");
2811     return;
2812   }
2813 
2814   int start = __ offset();
2815   if (os::is_MP()) {
2816     // make sure that the displacement word of the call ends up word aligned
2817     __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2818   }
2819   __ relocate(static_stub_Relocation::spec(call_pc));
2820   __ mov_metadata(rbx, (Metadata*)NULL);
2821   // must be set to -1 at code generation time
2822   assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2823   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2824   __ jump(RuntimeAddress(__ pc()));
2825 
2826   assert(__ offset() - start <= call_stub_size, "stub too big");










2827   __ end_a_stub();
2828 }
2829 
2830 
2831 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2832   assert(exceptionOop->as_register() == rax, "must match");
2833   assert(exceptionPC->as_register() == rdx, "must match");
2834 
2835   // exception object is not added to oop map by LinearScan
2836   // (LinearScan assumes that no oops are in fixed registers)
2837   info->add_register_oop(exceptionOop);
2838   Runtime1::StubID unwind_id;
2839 
2840   // get current pc information
2841   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2842   int pc_for_athrow_offset = __ offset();
2843   InternalAddress pc_for_athrow(__ pc());
2844   __ lea(exceptionPC->as_register(), pc_for_athrow);
2845   add_call_info(pc_for_athrow_offset, info); // for exception handler
2846 




 376 // This specifies the rsp decrement needed to build the frame
 377 int LIR_Assembler::initial_frame_size_in_bytes() const {
 378   // if rounding, must let FrameMap know!
 379 
 380   // The frame_map records size in slots (32bit word)
 381 
 382   // subtract two words to account for return address and link
 383   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 384 }
 385 
 386 
 387 int LIR_Assembler::emit_exception_handler() {
 388   // if the last instruction is a call (typically to do a throw which
 389   // is coming at the end after block reordering) the return address
 390   // must still point into the code area in order to avoid assertion
 391   // failures when searching for the corresponding bci => add a nop
 392   // (was bug 5/14/1999 - gri)
 393   __ nop();
 394 
 395   // generate code for exception handler
 396   address handler_base = __ start_a_stub(exception_handler_size());
 397   if (handler_base == NULL) {
 398     // not enough space left for the handler
 399     bailout("exception handler overflow");
 400     return -1;
 401   }
 402 
 403   int offset = code_offset();
 404 
 405   // the exception oop and pc are in rax, and rdx
 406   // no other registers need to be preserved, so invalidate them
 407   __ invalidate_registers(false, true, true, false, true, true);
 408 
 409   // check that there is really an exception
 410   __ verify_not_null_oop(rax);
 411 
 412   // search an exception handler (rax: exception oop, rdx: throwing pc)
 413   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
 414   __ should_not_reach_here();
 415   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 416   __ end_a_stub();
 417 
 418   return offset;
 419 }
 420 
 421 
 422 // Emit the code to remove the frame from the stack in the exception
 423 // unwind path.
 424 int LIR_Assembler::emit_unwind_handler() {
 425 #ifndef PRODUCT
 426   if (CommentedAssembly) {
 427     _masm->block_comment("Unwind handler");
 428   }
 429 #endif
 430 
 431   int offset = code_offset();
 432 
 433   // Fetch the exception from TLS and clear out exception related thread state
 434   Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
 435   NOT_LP64(__ get_thread(rsi));


 473   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 474 
 475   // Emit the slow path assembly
 476   if (stub != NULL) {
 477     stub->emit_code(this);
 478   }
 479 
 480   return offset;
 481 }
 482 
 483 
 484 int LIR_Assembler::emit_deopt_handler() {
 485   // if the last instruction is a call (typically to do a throw which
 486   // is coming at the end after block reordering) the return address
 487   // must still point into the code area in order to avoid assertion
 488   // failures when searching for the corresponding bci => add a nop
 489   // (was bug 5/14/1999 - gri)
 490   __ nop();
 491 
 492   // generate code for exception handler
 493   address handler_base = __ start_a_stub(deopt_handler_size());
 494   if (handler_base == NULL) {
 495     // not enough space left for the handler
 496     bailout("deopt handler overflow");
 497     return -1;
 498   }
 499 
 500   int offset = code_offset();
 501   InternalAddress here(__ pc());
 502 
 503   __ pushptr(here.addr());
 504   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 505   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 506   __ end_a_stub();
 507 
 508   return offset;
 509 }
 510 
 511 
 512 void LIR_Assembler::return_op(LIR_Opr result) {
 513   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 514   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 515     assert(result->fpu() == 0, "result must already be on TOS");
 516   }
 517 
 518   // Pop the stack before the safepoint code
 519   __ remove_frame(initial_frame_size_in_bytes());
 520 
 521   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 522     __ reserved_stack_check();
 523   }
 524 
 525   bool result_is_oop = result->is_valid() ? result->is_oop() : false;


2788 }
2789 
2790 
2791 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2792   __ ic_call(op->addr());
2793   add_call_info(code_offset(), op->info());
2794   assert(!os::is_MP() ||
2795          (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2796          "must be aligned");
2797 }
2798 
2799 
2800 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2801 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2802   ShouldNotReachHere();
2803 }
2804 
2805 
2806 void LIR_Assembler::emit_static_call_stub() {
2807   address call_pc = __ pc();
2808   address stub = __ start_a_stub(call_stub_size());
2809   if (stub == NULL) {
2810     bailout("static call stub overflow");
2811     return;
2812   }
2813 
2814   int start = __ offset();
2815   if (os::is_MP()) {
2816     // make sure that the displacement word of the call ends up word aligned
2817     __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2818   }
2819   __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
2820   __ mov_metadata(rbx, (Metadata*)NULL);
2821   // must be set to -1 at code generation time
2822   assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2823   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2824   __ jump(RuntimeAddress(__ pc()));
2825 
2826   if (UseAOT) {
2827     // Trampoline to aot code
2828     __ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */));
2829 #ifdef _LP64
2830     __ mov64(rax, CONST64(0));  // address is zapped till fixup time.
2831 #else
2832     __ movl(rax, 0xdeadffff);  // address is zapped till fixup time.
2833 #endif
2834     __ jmp(rax);
2835   }
2836   assert(__ offset() - start <= call_stub_size(), "stub too big");
2837   __ end_a_stub();
2838 }
2839 
2840 
2841 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2842   assert(exceptionOop->as_register() == rax, "must match");
2843   assert(exceptionPC->as_register() == rdx, "must match");
2844 
2845   // exception object is not added to oop map by LinearScan
2846   // (LinearScan assumes that no oops are in fixed registers)
2847   info->add_register_oop(exceptionOop);
2848   Runtime1::StubID unwind_id;
2849 
2850   // get current pc information
2851   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2852   int pc_for_athrow_offset = __ offset();
2853   InternalAddress pc_for_athrow(__ pc());
2854   __ lea(exceptionPC->as_register(), pc_for_athrow);
2855   add_call_info(pc_for_athrow_offset, info); // for exception handler
2856 


src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File