src/cpu/x86/vm/macroAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8004835 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/macroAssembler_x86.cpp

Print this page




3068     Assembler::xorpd(dst, as_Address(src));
3069   } else {
3070     lea(rscratch1, src);
3071     Assembler::xorpd(dst, Address(rscratch1, 0));
3072   }
3073 }
3074 
3075 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
3076   // Used in sign-bit flipping with aligned address.
3077   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3078   if (reachable(src)) {
3079     Assembler::xorps(dst, as_Address(src));
3080   } else {
3081     lea(rscratch1, src);
3082     Assembler::xorps(dst, Address(rscratch1, 0));
3083   }
3084 }
3085 
3086 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
3087   // Used in sign-bit flipping with aligned address.
3088   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");

3089   if (reachable(src)) {
3090     Assembler::pshufb(dst, as_Address(src));
3091   } else {
3092     lea(rscratch1, src);
3093     Assembler::pshufb(dst, Address(rscratch1, 0));
3094   }
3095 }
3096 
3097 // AVX 3-operands instructions
3098 
3099 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3100   if (reachable(src)) {
3101     vaddsd(dst, nds, as_Address(src));
3102   } else {
3103     lea(rscratch1, src);
3104     vaddsd(dst, nds, Address(rscratch1, 0));
3105   }
3106 }
3107 
3108 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3109   if (reachable(src)) {
3110     vaddss(dst, nds, as_Address(src));
3111   } else {
3112     lea(rscratch1, src);
3113     vaddss(dst, nds, Address(rscratch1, 0));




3068     Assembler::xorpd(dst, as_Address(src));
3069   } else {
3070     lea(rscratch1, src);
3071     Assembler::xorpd(dst, Address(rscratch1, 0));
3072   }
3073 }
3074 
3075 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
3076   // Used in sign-bit flipping with aligned address.
3077   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3078   if (reachable(src)) {
3079     Assembler::xorps(dst, as_Address(src));
3080   } else {
3081     lea(rscratch1, src);
3082     Assembler::xorps(dst, Address(rscratch1, 0));
3083   }
3084 }
3085 
3086 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
3087   // Used in sign-bit flipping with aligned address.
3088   bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
3089   assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
3090   if (reachable(src)) {
3091     Assembler::pshufb(dst, as_Address(src), aligned_adr);
3092   } else {
3093     lea(rscratch1, src);
3094     Assembler::pshufb(dst, Address(rscratch1, 0), aligned_adr);
3095   }
3096 }
3097 
3098 // AVX 3-operands instructions
3099 
3100 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3101   if (reachable(src)) {
3102     vaddsd(dst, nds, as_Address(src));
3103   } else {
3104     lea(rscratch1, src);
3105     vaddsd(dst, nds, Address(rscratch1, 0));
3106   }
3107 }
3108 
3109 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3110   if (reachable(src)) {
3111     vaddss(dst, nds, as_Address(src));
3112   } else {
3113     lea(rscratch1, src);
3114     vaddss(dst, nds, Address(rscratch1, 0));


src/cpu/x86/vm/macroAssembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File