src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6919934 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

Print this page




 419 
 420 
 421 int LIR_Assembler::emit_exception_handler() {
 422   // if the last instruction is a call (typically to do a throw which
 423   // is coming at the end after block reordering) the return address
 424   // must still point into the code area in order to avoid assertion
 425   // failures when searching for the corresponding bci => add a nop
 426   // (was bug 5/14/1999 - gri)
 427   __ nop();
 428 
 429   // generate code for exception handler
 430   address handler_base = __ start_a_stub(exception_handler_size);
 431   if (handler_base == NULL) {
 432     // not enough space left for the handler
 433     bailout("exception handler overflow");
 434     return -1;
 435   }
 436 
 437   int offset = code_offset();
 438 
 439   // if the method does not have an exception handler, then there is
 440   // no reason to search for one
 441   if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
 442     // the exception oop and pc are in rax, and rdx
 443     // no other registers need to be preserved, so invalidate them
 444     __ invalidate_registers(false, true, true, false, true, true);
 445 
 446     // check that there is really an exception
 447     __ verify_not_null_oop(rax);
 448 
 449     // search an exception handler (rax: exception oop, rdx: throwing pc)
 450     __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
 451 
 452     // if the call returns here, then the exception handler for particular
 453     // exception doesn't exist -> unwind activation and forward exception to caller
 454   }
 455 
 456   // the exception oop is in rax,
 457   // no other registers need to be preserved, so invalidate them
 458   __ invalidate_registers(false, true, true, true, true, true);
 459 
 460   // check that there is really an exception
 461   __ verify_not_null_oop(rax);
 462 
 463   // unlock the receiver/klass if necessary
 464   // rax,: exception
 465   ciMethod* method = compilation()->method();
 466   if (method->is_synchronized() && GenerateSynchronizationCode) {
 467     monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
 468   }
 469 
 470   // unwind activation and forward exception to caller
 471   // rax,: exception
 472   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 473   assert(code_offset() - offset <= exception_handler_size, "overflow");
 474   __ end_a_stub();
 475 
 476   return offset;
 477 }
 478 
 479 
 480 int LIR_Assembler::emit_deopt_handler() {
 481   // if the last instruction is a call (typically to do a throw which
 482   // is coming at the end after block reordering) the return address
 483   // must still point into the code area in order to avoid assertion
 484   // failures when searching for the corresponding bci => add a nop
 485   // (was bug 5/14/1999 - gri)
 486   __ nop();
 487 
 488   // generate code for exception handler
 489   address handler_base = __ start_a_stub(deopt_handler_size);
 490   if (handler_base == NULL) {
 491     // not enough space left for the handler
 492     bailout("deopt handler overflow");
 493     return -1;
 494   }
 495 
 496   int offset = code_offset();
 497   InternalAddress here(__ pc());

 498   __ pushptr(here.addr());
 499   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));

 500   assert(code_offset() - offset <= deopt_handler_size, "overflow");
 501   __ end_a_stub();
 502 
 503   return offset;
 504 }
 505 
 506 
 507 // This is the fast version of java.lang.String.compare; it has not
 508 // OSR-entry and therefore, we generate a slow version for OSR's
 509 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
 510   __ movptr (rbx, rcx); // receiver is in rcx
 511   __ movptr (rax, arg1->as_register());
 512 
 513   // Get addresses of first characters from both Strings
 514   __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
 515   __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
 516   __ lea    (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 517 
 518 
 519   // rbx, may be NULL


 576 
 577   // strings are equal up to min length
 578 
 579   __ bind(noLoop);
 580   __ pop(rax);
 581   return_op(LIR_OprFact::illegalOpr);
 582 
 583   __ bind(haveResult);
 584   // leave instruction is going to discard the TOS value
 585   __ mov (rax, rcx); // result of call is in rax,
 586 }
 587 
 588 
 589 void LIR_Assembler::return_op(LIR_Opr result) {
 590   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 591   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 592     assert(result->fpu() == 0, "result must already be on TOS");
 593   }
 594 
 595   // Pop the stack before the safepoint code
 596   __ leave();
 597 
 598   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 599 
 600   // Note: we do not need to round double result; float result has the right precision
 601   // the poll sets the condition code, but no data registers
 602   AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
 603                               relocInfo::poll_return_type);
 604 
 605   // NOTE: the requires that the polling page be reachable else the reloc
 606   // goes to the movq that loads the address and not the faulting instruction
 607   // which breaks the signal handler code
 608 
 609   __ test32(rax, polling_page);
 610 
 611   __ ret(0);
 612 }
 613 
 614 
 615 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 616   AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),


2721       __ bind(done);
2722 
2723 #else
2724     __ lcmp2int(left->as_register_hi(),
2725                 left->as_register_lo(),
2726                 right->as_register_hi(),
2727                 right->as_register_lo());
2728     move_regs(left->as_register_hi(), dst->as_register());
2729 #endif // _LP64
2730   }
2731 }
2732 
2733 
2734 void LIR_Assembler::align_call(LIR_Code code) {
2735   if (os::is_MP()) {
2736     // make sure that the displacement word of the call ends up word aligned
2737     int offset = __ offset();
2738     switch (code) {
2739       case lir_static_call:
2740       case lir_optvirtual_call:

2741         offset += NativeCall::displacement_offset;
2742         break;
2743       case lir_icvirtual_call:
2744         offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2745       break;
2746       case lir_virtual_call:  // currently, sparc-specific for niagara
2747       default: ShouldNotReachHere();
2748     }
2749     while (offset++ % BytesPerWord != 0) {
2750       __ nop();
2751     }
2752   }
2753 }
2754 
2755 
2756 void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
2757   assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2758          "must be aligned");
2759   __ call(AddressLiteral(entry, rtype));
2760   add_call_info(code_offset(), info);
2761 }
2762 
2763 
2764 void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
2765   RelocationHolder rh = virtual_call_Relocation::spec(pc());
2766   __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
2767   assert(!os::is_MP() ||
2768          (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2769          "must be aligned");
2770   __ call(AddressLiteral(entry, rh));
2771   add_call_info(code_offset(), info);
2772 }
2773 
2774 
2775 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2776 void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
2777   ShouldNotReachHere();
2778 }
2779 











2780 void LIR_Assembler::emit_static_call_stub() {
2781   address call_pc = __ pc();
2782   address stub = __ start_a_stub(call_stub_size);
2783   if (stub == NULL) {
2784     bailout("static call stub overflow");
2785     return;
2786   }
2787 
2788   int start = __ offset();
2789   if (os::is_MP()) {
2790     // make sure that the displacement word of the call ends up word aligned
2791     int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
2792     while (offset++ % BytesPerWord != 0) {
2793       __ nop();
2794     }
2795   }
2796   __ relocate(static_stub_Relocation::spec(call_pc));
2797   __ movoop(rbx, (jobject)NULL);
2798   // must be set to -1 at code generation time
2799   assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");


2812   // exception object is not added to oop map by LinearScan
2813   // (LinearScan assumes that no oops are in fixed registers)
2814   info->add_register_oop(exceptionOop);
2815   Runtime1::StubID unwind_id;
2816 
2817   if (!unwind) {
2818     // get current pc information
2819     // pc is only needed if the method has an exception handler, the unwind code does not need it.
2820     int pc_for_athrow_offset = __ offset();
2821     InternalAddress pc_for_athrow(__ pc());
2822     __ lea(exceptionPC->as_register(), pc_for_athrow);
2823     add_call_info(pc_for_athrow_offset, info); // for exception handler
2824 
2825     __ verify_not_null_oop(rax);
2826     // search an exception handler (rax: exception oop, rdx: throwing pc)
2827     if (compilation()->has_fpu_code()) {
2828       unwind_id = Runtime1::handle_exception_id;
2829     } else {
2830       unwind_id = Runtime1::handle_exception_nofpu_id;
2831     }

2832   } else {
2833     unwind_id = Runtime1::unwind_exception_id;


2834   }
2835   __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2836 
2837   // enough room for two byte trap
2838   __ nop();
2839 }
2840 
2841 
2842 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2843 
2844   // optimized version for linear scan:
2845   // * count must be already in ECX (guaranteed by LinearScan)
2846   // * left and dest must be equal
2847   // * tmp must be unused
2848   assert(count->as_register() == SHIFT_count, "count must be in ECX");
2849   assert(left == dest, "left and dest must be equal");
2850   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2851 
2852   if (left->is_single_cpu()) {
2853     Register value = left->as_register();
2854     assert(value != SHIFT_count, "left cannot be ECX");
2855 




 419 
 420 
 421 int LIR_Assembler::emit_exception_handler() {
 422   // if the last instruction is a call (typically to do a throw which
 423   // is coming at the end after block reordering) the return address
 424   // must still point into the code area in order to avoid assertion
 425   // failures when searching for the corresponding bci => add a nop
 426   // (was bug 5/14/1999 - gri)
 427   __ nop();
 428 
 429   // generate code for exception handler
 430   address handler_base = __ start_a_stub(exception_handler_size);
 431   if (handler_base == NULL) {
 432     // not enough space left for the handler
 433     bailout("exception handler overflow");
 434     return -1;
 435   }
 436 
 437   int offset = code_offset();
 438 



 439   // the exception oop and pc are in rax, and rdx
 440   // no other registers need to be preserved, so invalidate them
 441   __ invalidate_registers(false, true, true, false, true, true);
 442 
 443   // check that there is really an exception
 444   __ verify_not_null_oop(rax);
 445 
 446   // search an exception handler (rax: exception oop, rdx: throwing pc)
 447   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
 448 
 449   __ stop("should not reach here");









 450 










 451   assert(code_offset() - offset <= exception_handler_size, "overflow");
 452   __ end_a_stub();
 453 
 454   return offset;
 455 }
 456 
 457 
 458 int LIR_Assembler::emit_deopt_handler() {
 459   // if the last instruction is a call (typically to do a throw which
 460   // is coming at the end after block reordering) the return address
 461   // must still point into the code area in order to avoid assertion
 462   // failures when searching for the corresponding bci => add a nop
 463   // (was bug 5/14/1999 - gri)
 464   __ nop();
 465 
 466   // generate code for exception handler
 467   address handler_base = __ start_a_stub(deopt_handler_size);
 468   if (handler_base == NULL) {
 469     // not enough space left for the handler
 470     bailout("deopt handler overflow");
 471     return -1;
 472   }
 473 
 474   int offset = code_offset();
 475   InternalAddress here(__ pc());
 476 
 477   __ pushptr(here.addr());
 478   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 479 
 480   assert(code_offset() - offset <= deopt_handler_size, "overflow");
 481   __ end_a_stub();
 482 
 483   return offset;
 484 }
 485 
 486 
 487 // This is the fast version of java.lang.String.compare; it has not
 488 // OSR-entry and therefore, we generate a slow version for OSR's
 489 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
 490   __ movptr (rbx, rcx); // receiver is in rcx
 491   __ movptr (rax, arg1->as_register());
 492 
 493   // Get addresses of first characters from both Strings
 494   __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
 495   __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
 496   __ lea    (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 497 
 498 
 499   // rbx, may be NULL


 556 
 557   // strings are equal up to min length
 558 
 559   __ bind(noLoop);
 560   __ pop(rax);
 561   return_op(LIR_OprFact::illegalOpr);
 562 
 563   __ bind(haveResult);
 564   // leave instruction is going to discard the TOS value
 565   __ mov (rax, rcx); // result of call is in rax,
 566 }
 567 
 568 
 569 void LIR_Assembler::return_op(LIR_Opr result) {
 570   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 571   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 572     assert(result->fpu() == 0, "result must already be on TOS");
 573   }
 574 
 575   // Pop the stack before the safepoint code
 576   __ remove_frame(initial_frame_size_in_bytes());
 577 
 578   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 579 
 580   // Note: we do not need to round double result; float result has the right precision
 581   // the poll sets the condition code, but no data registers
 582   AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
 583                               relocInfo::poll_return_type);
 584 
 585   // NOTE: the requires that the polling page be reachable else the reloc
 586   // goes to the movq that loads the address and not the faulting instruction
 587   // which breaks the signal handler code
 588 
 589   __ test32(rax, polling_page);
 590 
 591   __ ret(0);
 592 }
 593 
 594 
 595 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 596   AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),


2701       __ bind(done);
2702 
2703 #else
2704     __ lcmp2int(left->as_register_hi(),
2705                 left->as_register_lo(),
2706                 right->as_register_hi(),
2707                 right->as_register_lo());
2708     move_regs(left->as_register_hi(), dst->as_register());
2709 #endif // _LP64
2710   }
2711 }
2712 
2713 
2714 void LIR_Assembler::align_call(LIR_Code code) {
2715   if (os::is_MP()) {
2716     // make sure that the displacement word of the call ends up word aligned
2717     int offset = __ offset();
2718     switch (code) {
2719       case lir_static_call:
2720       case lir_optvirtual_call:
2721       case lir_dynamic_call:
2722         offset += NativeCall::displacement_offset;
2723         break;
2724       case lir_icvirtual_call:
2725         offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2726       break;
2727       case lir_virtual_call:  // currently, sparc-specific for niagara
2728       default: ShouldNotReachHere();
2729     }
2730     while (offset++ % BytesPerWord != 0) {
2731       __ nop();
2732     }
2733   }
2734 }
2735 
2736 
2737 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2738   assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2739          "must be aligned");
2740   __ call(AddressLiteral(op->addr(), rtype));
2741   add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
2742 }
2743 
2744 
2745 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2746   RelocationHolder rh = virtual_call_Relocation::spec(pc());
2747   __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
2748   assert(!os::is_MP() ||
2749          (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2750          "must be aligned");
2751   __ call(AddressLiteral(op->addr(), rh));
2752   add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
2753 }
2754 
2755 
2756 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2757 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2758   ShouldNotReachHere();
2759 }
2760 
2761 
2762 void LIR_Assembler::preserve_SP() {
2763   __ movptr(rbp, rsp);
2764 }
2765 
2766 
2767 void LIR_Assembler::restore_SP() {
2768   __ movptr(rsp, rbp);
2769 }
2770 
2771 
2772 void LIR_Assembler::emit_static_call_stub() {
2773   address call_pc = __ pc();
2774   address stub = __ start_a_stub(call_stub_size);
2775   if (stub == NULL) {
2776     bailout("static call stub overflow");
2777     return;
2778   }
2779 
2780   int start = __ offset();
2781   if (os::is_MP()) {
2782     // make sure that the displacement word of the call ends up word aligned
2783     int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
2784     while (offset++ % BytesPerWord != 0) {
2785       __ nop();
2786     }
2787   }
2788   __ relocate(static_stub_Relocation::spec(call_pc));
2789   __ movoop(rbx, (jobject)NULL);
2790   // must be set to -1 at code generation time
2791   assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");


2804   // exception object is not added to oop map by LinearScan
2805   // (LinearScan assumes that no oops are in fixed registers)
2806   info->add_register_oop(exceptionOop);
2807   Runtime1::StubID unwind_id;
2808 
2809   if (!unwind) {
2810     // get current pc information
2811     // pc is only needed if the method has an exception handler, the unwind code does not need it.
2812     int pc_for_athrow_offset = __ offset();
2813     InternalAddress pc_for_athrow(__ pc());
2814     __ lea(exceptionPC->as_register(), pc_for_athrow);
2815     add_call_info(pc_for_athrow_offset, info); // for exception handler
2816 
2817     __ verify_not_null_oop(rax);
2818     // search an exception handler (rax: exception oop, rdx: throwing pc)
2819     if (compilation()->has_fpu_code()) {
2820       unwind_id = Runtime1::handle_exception_id;
2821     } else {
2822       unwind_id = Runtime1::handle_exception_nofpu_id;
2823     }
2824     __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2825   } else {
2826     // remove the activation
2827     __ remove_frame(initial_frame_size_in_bytes());
2828     __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
2829   }

2830 
2831   // enough room for two byte trap
2832   __ nop();
2833 }
2834 
2835 
2836 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2837 
2838   // optimized version for linear scan:
2839   // * count must be already in ECX (guaranteed by LinearScan)
2840   // * left and dest must be equal
2841   // * tmp must be unused
2842   assert(count->as_register() == SHIFT_count, "count must be in ECX");
2843   assert(left == dest, "left and dest must be equal");
2844   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2845 
2846   if (left->is_single_cpu()) {
2847     Register value = left->as_register();
2848     assert(value != SHIFT_count, "left cannot be ECX");
2849 


src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File