< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page




1890         __ bind(failure);
1891         __ xorptr(dst, dst);
1892         __ jmpb(done);
1893         __ bind(success);
1894         __ movptr(dst, 1);
1895         __ bind(done);
1896       } else {
1897         ShouldNotReachHere();
1898       }
1899 
1900 }
1901 
1902 
1903 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1904   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1905     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1906     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1907     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1908     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1909     Register addr = op->addr()->as_register();
1910     if (os::is_MP()) {
1911       __ lock();
1912     }
1913     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1914 
1915   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1916     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1917     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1918     Register newval = op->new_value()->as_register();
1919     Register cmpval = op->cmp_value()->as_register();
1920     assert(cmpval == rax, "wrong register");
1921     assert(newval != NULL, "new val must be register");
1922     assert(cmpval != newval, "cmp and new values must be in different registers");
1923     assert(cmpval != addr, "cmp and addr must be in different registers");
1924     assert(newval != addr, "new value and addr must be in different registers");
1925 
1926     if ( op->code() == lir_cas_obj) {
1927 #ifdef _LP64
1928       if (UseCompressedOops) {
1929         __ encode_heap_oop(cmpval);
1930         __ mov(rscratch1, newval);
1931         __ encode_heap_oop(rscratch1);
1932         if (os::is_MP()) {
1933           __ lock();
1934         }
1935         // cmpval (rax) is implicitly used by this instruction
1936         __ cmpxchgl(rscratch1, Address(addr, 0));
1937       } else
1938 #endif
1939       {
1940         if (os::is_MP()) {
1941           __ lock();
1942         }
1943         __ cmpxchgptr(newval, Address(addr, 0));
1944       }
1945     } else {
1946       assert(op->code() == lir_cas_int, "lir_cas_int expected");
1947       if (os::is_MP()) {
1948         __ lock();
1949       }
1950       __ cmpxchgl(newval, Address(addr, 0));
1951     }
1952 #ifdef _LP64
1953   } else if (op->code() == lir_cas_long) {
1954     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1955     Register newval = op->new_value()->as_register_lo();
1956     Register cmpval = op->cmp_value()->as_register_lo();
1957     assert(cmpval == rax, "wrong register");
1958     assert(newval != NULL, "new val must be register");
1959     assert(cmpval != newval, "cmp and new values must be in different registers");
1960     assert(cmpval != addr, "cmp and addr must be in different registers");
1961     assert(newval != addr, "new value and addr must be in different registers");
1962     if (os::is_MP()) {
1963       __ lock();
1964     }
1965     __ cmpxchgq(newval, Address(addr, 0));
1966 #endif // _LP64
1967   } else {
1968     Unimplemented();
1969   }
1970 }
1971 
1972 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1973   Assembler::Condition acond, ncond;
1974   switch (condition) {
1975     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1976     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1977     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1978     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1979     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1980     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1981     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1982     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1983     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
1984                                 ShouldNotReachHere();


2779     Label done;
2780     Register dest = dst->as_register();
2781     __ cmpptr(left->as_register_lo(), right->as_register_lo());
2782     __ movl(dest, -1);
2783     __ jccb(Assembler::less, done);
2784     __ set_byte_if_not_zero(dest);
2785     __ movzbl(dest, dest);
2786     __ bind(done);
2787 #else
2788     __ lcmp2int(left->as_register_hi(),
2789                 left->as_register_lo(),
2790                 right->as_register_hi(),
2791                 right->as_register_lo());
2792     move_regs(left->as_register_hi(), dst->as_register());
2793 #endif // _LP64
2794   }
2795 }
2796 
2797 
2798 void LIR_Assembler::align_call(LIR_Code code) {
2799   if (os::is_MP()) {
2800     // make sure that the displacement word of the call ends up word aligned
2801     int offset = __ offset();
2802     switch (code) {
2803       case lir_static_call:
2804       case lir_optvirtual_call:
2805       case lir_dynamic_call:
2806         offset += NativeCall::displacement_offset;
2807         break;
2808       case lir_icvirtual_call:
2809         offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2810       break;
2811       case lir_virtual_call:  // currently, sparc-specific for niagara
2812       default: ShouldNotReachHere();
2813     }
2814     __ align(BytesPerWord, offset);
2815   }
2816 }
2817 
2818 
2819 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2820   assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2821          "must be aligned");
2822   __ call(AddressLiteral(op->addr(), rtype));
2823   add_call_info(code_offset(), op->info());
2824 }
2825 
2826 
2827 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2828   __ ic_call(op->addr());
2829   add_call_info(code_offset(), op->info());
2830   assert(!os::is_MP() ||
2831          (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2832          "must be aligned");
2833 }
2834 
2835 
2836 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2837 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2838   ShouldNotReachHere();
2839 }
2840 
2841 
2842 void LIR_Assembler::emit_static_call_stub() {
2843   address call_pc = __ pc();
2844   address stub = __ start_a_stub(call_stub_size());
2845   if (stub == NULL) {
2846     bailout("static call stub overflow");
2847     return;
2848   }
2849 
2850   int start = __ offset();
2851   if (os::is_MP()) {
2852     // make sure that the displacement word of the call ends up word aligned
2853     __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2854   }
2855   __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
2856   __ mov_metadata(rbx, (Metadata*)NULL);
2857   // must be set to -1 at code generation time
2858   assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2859   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2860   __ jump(RuntimeAddress(__ pc()));
2861 
2862   if (UseAOT) {
2863     // Trampoline to aot code
2864     __ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */));
2865 #ifdef _LP64
2866     __ mov64(rax, CONST64(0));  // address is zapped till fixup time.
2867 #else
2868     __ movl(rax, 0xdeadffff);  // address is zapped till fixup time.
2869 #endif
2870     __ jmp(rax);
2871   }
2872   assert(__ offset() - start <= call_stub_size(), "stub too big");
2873   __ end_a_stub();
2874 }
2875 
2876 
2877 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2878   assert(exceptionOop->as_register() == rax, "must match");


3955 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3956   assert(result_reg->is_register(), "check");
3957 #ifdef _LP64
3958   // __ get_thread(result_reg->as_register_lo());
3959   __ mov(result_reg->as_register(), r15_thread);
3960 #else
3961   __ get_thread(result_reg->as_register());
3962 #endif // _LP64
3963 }
3964 
3965 
3966 void LIR_Assembler::peephole(LIR_List*) {
3967   // do nothing for now
3968 }
3969 
3970 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3971   assert(data == dest, "xchg/xadd uses only 2 operands");
3972 
3973   if (data->type() == T_INT) {
3974     if (code == lir_xadd) {
3975       if (os::is_MP()) {
3976         __ lock();
3977       }
3978       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3979     } else {
3980       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3981     }
3982   } else if (data->is_oop()) {
3983     assert (code == lir_xchg, "xadd for oops");
3984     Register obj = data->as_register();
3985 #ifdef _LP64
3986     if (UseCompressedOops) {
3987       __ encode_heap_oop(obj);
3988       __ xchgl(obj, as_Address(src->as_address_ptr()));
3989       __ decode_heap_oop(obj);
3990     } else {
3991       __ xchgptr(obj, as_Address(src->as_address_ptr()));
3992     }
3993 #else
3994     __ xchgl(obj, as_Address(src->as_address_ptr()));
3995 #endif
3996   } else if (data->type() == T_LONG) {
3997 #ifdef _LP64
3998     assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3999     if (code == lir_xadd) {
4000       if (os::is_MP()) {
4001         __ lock();
4002       }
4003       __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
4004     } else {
4005       __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
4006     }
4007 #else
4008     ShouldNotReachHere();
4009 #endif
4010   } else {
4011     ShouldNotReachHere();
4012   }
4013 }
4014 
4015 #undef __


1890         __ bind(failure);
1891         __ xorptr(dst, dst);
1892         __ jmpb(done);
1893         __ bind(success);
1894         __ movptr(dst, 1);
1895         __ bind(done);
1896       } else {
1897         ShouldNotReachHere();
1898       }
1899 
1900 }
1901 
1902 
1903 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1904   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1905     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1906     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1907     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1908     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1909     Register addr = op->addr()->as_register();

1910     __ lock();

1911     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1912 
1913   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1914     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1915     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1916     Register newval = op->new_value()->as_register();
1917     Register cmpval = op->cmp_value()->as_register();
1918     assert(cmpval == rax, "wrong register");
1919     assert(newval != NULL, "new val must be register");
1920     assert(cmpval != newval, "cmp and new values must be in different registers");
1921     assert(cmpval != addr, "cmp and addr must be in different registers");
1922     assert(newval != addr, "new value and addr must be in different registers");
1923 
1924     if ( op->code() == lir_cas_obj) {
1925 #ifdef _LP64
1926       if (UseCompressedOops) {
1927         __ encode_heap_oop(cmpval);
1928         __ mov(rscratch1, newval);
1929         __ encode_heap_oop(rscratch1);

1930         __ lock();

1931         // cmpval (rax) is implicitly used by this instruction
1932         __ cmpxchgl(rscratch1, Address(addr, 0));
1933       } else
1934 #endif
1935       {

1936         __ lock();

1937         __ cmpxchgptr(newval, Address(addr, 0));
1938       }
1939     } else {
1940       assert(op->code() == lir_cas_int, "lir_cas_int expected");

1941       __ lock();

1942       __ cmpxchgl(newval, Address(addr, 0));
1943     }
1944 #ifdef _LP64
1945   } else if (op->code() == lir_cas_long) {
1946     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1947     Register newval = op->new_value()->as_register_lo();
1948     Register cmpval = op->cmp_value()->as_register_lo();
1949     assert(cmpval == rax, "wrong register");
1950     assert(newval != NULL, "new val must be register");
1951     assert(cmpval != newval, "cmp and new values must be in different registers");
1952     assert(cmpval != addr, "cmp and addr must be in different registers");
1953     assert(newval != addr, "new value and addr must be in different registers");

1954     __ lock();

1955     __ cmpxchgq(newval, Address(addr, 0));
1956 #endif // _LP64
1957   } else {
1958     Unimplemented();
1959   }
1960 }
1961 
1962 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1963   Assembler::Condition acond, ncond;
1964   switch (condition) {
1965     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1966     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1967     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1968     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1969     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1970     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1971     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1972     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1973     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
1974                                 ShouldNotReachHere();


2769     Label done;
2770     Register dest = dst->as_register();
2771     __ cmpptr(left->as_register_lo(), right->as_register_lo());
2772     __ movl(dest, -1);
2773     __ jccb(Assembler::less, done);
2774     __ set_byte_if_not_zero(dest);
2775     __ movzbl(dest, dest);
2776     __ bind(done);
2777 #else
2778     __ lcmp2int(left->as_register_hi(),
2779                 left->as_register_lo(),
2780                 right->as_register_hi(),
2781                 right->as_register_lo());
2782     move_regs(left->as_register_hi(), dst->as_register());
2783 #endif // _LP64
2784   }
2785 }
2786 
2787 
2788 void LIR_Assembler::align_call(LIR_Code code) {

2789   // make sure that the displacement word of the call ends up word aligned
2790   int offset = __ offset();
2791   switch (code) {
2792   case lir_static_call:
2793   case lir_optvirtual_call:
2794   case lir_dynamic_call:
2795     offset += NativeCall::displacement_offset;
2796     break;
2797   case lir_icvirtual_call:
2798     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2799     break;
2800   case lir_virtual_call:  // currently, sparc-specific for niagara
2801   default: ShouldNotReachHere();
2802   }
2803   __ align(BytesPerWord, offset);

2804 }
2805 
2806 
2807 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2808   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2809          "must be aligned");
2810   __ call(AddressLiteral(op->addr(), rtype));
2811   add_call_info(code_offset(), op->info());
2812 }
2813 
2814 
2815 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2816   __ ic_call(op->addr());
2817   add_call_info(code_offset(), op->info());
2818   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,

2819          "must be aligned");
2820 }
2821 
2822 
2823 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2824 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2825   ShouldNotReachHere();
2826 }
2827 
2828 
2829 void LIR_Assembler::emit_static_call_stub() {
2830   address call_pc = __ pc();
2831   address stub = __ start_a_stub(call_stub_size());
2832   if (stub == NULL) {
2833     bailout("static call stub overflow");
2834     return;
2835   }
2836 
2837   int start = __ offset();
2838 
2839   // make sure that the displacement word of the call ends up word aligned
2840   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);

2841   __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
2842   __ mov_metadata(rbx, (Metadata*)NULL);
2843   // must be set to -1 at code generation time
2844   assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2845   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
2846   __ jump(RuntimeAddress(__ pc()));
2847 
2848   if (UseAOT) {
2849     // Trampoline to aot code
2850     __ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */));
2851 #ifdef _LP64
2852     __ mov64(rax, CONST64(0));  // address is zapped till fixup time.
2853 #else
2854     __ movl(rax, 0xdeadffff);  // address is zapped till fixup time.
2855 #endif
2856     __ jmp(rax);
2857   }
2858   assert(__ offset() - start <= call_stub_size(), "stub too big");
2859   __ end_a_stub();
2860 }
2861 
2862 
2863 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2864   assert(exceptionOop->as_register() == rax, "must match");


3941 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3942   assert(result_reg->is_register(), "check");
3943 #ifdef _LP64
3944   // __ get_thread(result_reg->as_register_lo());
3945   __ mov(result_reg->as_register(), r15_thread);
3946 #else
3947   __ get_thread(result_reg->as_register());
3948 #endif // _LP64
3949 }
3950 
3951 
3952 void LIR_Assembler::peephole(LIR_List*) {
3953   // do nothing for now
3954 }
3955 
3956 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3957   assert(data == dest, "xchg/xadd uses only 2 operands");
3958 
3959   if (data->type() == T_INT) {
3960     if (code == lir_xadd) {

3961       __ lock();

3962       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
3963     } else {
3964       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
3965     }
3966   } else if (data->is_oop()) {
3967     assert (code == lir_xchg, "xadd for oops");
3968     Register obj = data->as_register();
3969 #ifdef _LP64
3970     if (UseCompressedOops) {
3971       __ encode_heap_oop(obj);
3972       __ xchgl(obj, as_Address(src->as_address_ptr()));
3973       __ decode_heap_oop(obj);
3974     } else {
3975       __ xchgptr(obj, as_Address(src->as_address_ptr()));
3976     }
3977 #else
3978     __ xchgl(obj, as_Address(src->as_address_ptr()));
3979 #endif
3980   } else if (data->type() == T_LONG) {
3981 #ifdef _LP64
3982     assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
3983     if (code == lir_xadd) {

3984       __ lock();

3985       __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
3986     } else {
3987       __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
3988     }
3989 #else
3990     ShouldNotReachHere();
3991 #endif
3992   } else {
3993     ShouldNotReachHere();
3994   }
3995 }
3996 
3997 #undef __
< prev index next >