< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page
rev 50307 : [mq]: cont


 976 
 977 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src) {
 978   if (reachable(src)) {
 979     Assembler::addpd(dst, as_Address(src));
 980   } else {
 981     lea(rscratch1, src);
 982     Assembler::addpd(dst, Address(rscratch1, 0));
 983   }
 984 }
 985 
 986 void MacroAssembler::align(int modulus) {
 987   align(modulus, offset());
 988 }
 989 
 990 void MacroAssembler::align(int modulus, int target) {
 991   if (target % modulus != 0) {
 992     nop(modulus - (target % modulus));
 993   }
 994 }
 995 




















 996 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
 997   // Used in sign-masking with aligned address.
 998   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
 999   if (reachable(src)) {
1000     Assembler::andpd(dst, as_Address(src));
1001   } else {
1002     lea(rscratch1, src);
1003     Assembler::andpd(dst, Address(rscratch1, 0));
1004   }
1005 }
1006 
1007 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
1008   // Used in sign-masking with aligned address.
1009   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1010   if (reachable(src)) {
1011     Assembler::andps(dst, as_Address(src));
1012   } else {
1013     lea(rscratch1, src);
1014     Assembler::andps(dst, Address(rscratch1, 0));
1015   }


3669 }
3670 
3671 #ifdef _LP64
3672 #define XSTATE_BV 0x200
3673 #endif
3674 
3675 void MacroAssembler::pop_CPU_state() {
3676   pop_FPU_state();
3677   pop_IU_state();
3678 }
3679 
3680 void MacroAssembler::pop_FPU_state() {
3681 #ifndef _LP64
3682   frstor(Address(rsp, 0));
3683 #else
3684   fxrstor(Address(rsp, 0));
3685 #endif
3686   addptr(rsp, FPUStateSizeInWords * wordSize);
3687 }
3688 











3689 void MacroAssembler::pop_IU_state() {
3690   popa();
3691   LP64_ONLY(addq(rsp, 8));
3692   popf();
3693 }
3694 
3695 // Save Integer and Float state
3696 // Warning: Stack must be 16 byte aligned (64bit)
3697 void MacroAssembler::push_CPU_state() {
3698   push_IU_state();
3699   push_FPU_state();
3700 }
3701 
3702 void MacroAssembler::push_FPU_state() {
3703   subptr(rsp, FPUStateSizeInWords * wordSize);
3704 #ifndef _LP64
3705   fnsave(Address(rsp, 0));
3706   fwait();
3707 #else
3708   fxsave(Address(rsp, 0));




 976 
 977 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src) {
 978   if (reachable(src)) {
 979     Assembler::addpd(dst, as_Address(src));
 980   } else {
 981     lea(rscratch1, src);
 982     Assembler::addpd(dst, Address(rscratch1, 0));
 983   }
 984 }
 985 
 986 void MacroAssembler::align(int modulus) {
 987   align(modulus, offset());
 988 }
 989 
 990 void MacroAssembler::align(int modulus, int target) {
 991   if (target % modulus != 0) {
 992     nop(modulus - (target % modulus));
 993   }
 994 }
 995 
 996 void MacroAssembler::push_f(XMMRegister r) {
 997   subptr(rsp, wordSize);
 998   movflt(Address(rsp, 0), r);
 999 }
1000 
1001 void MacroAssembler::pop_f(XMMRegister r) {
1002   movflt(r, Address(rsp, 0));
1003   addptr(rsp, wordSize);
1004 }
1005 
1006 void MacroAssembler::push_d(XMMRegister r) {
1007   subptr(rsp, 2 * wordSize);
1008   movdbl(Address(rsp, 0), r);
1009 }
1010 
1011 void MacroAssembler::pop_d(XMMRegister r) {
1012   movdbl(r, Address(rsp, 0));
1013   addptr(rsp, 2 * Interpreter::stackElementSize);
1014 }
1015 
1016 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
1017   // Used in sign-masking with aligned address.
1018   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1019   if (reachable(src)) {
1020     Assembler::andpd(dst, as_Address(src));
1021   } else {
1022     lea(rscratch1, src);
1023     Assembler::andpd(dst, Address(rscratch1, 0));
1024   }
1025 }
1026 
1027 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
1028   // Used in sign-masking with aligned address.
1029   assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1030   if (reachable(src)) {
1031     Assembler::andps(dst, as_Address(src));
1032   } else {
1033     lea(rscratch1, src);
1034     Assembler::andps(dst, Address(rscratch1, 0));
1035   }


3689 }
3690 
3691 #ifdef _LP64
3692 #define XSTATE_BV 0x200
3693 #endif
3694 
3695 void MacroAssembler::pop_CPU_state() {
3696   pop_FPU_state();
3697   pop_IU_state();
3698 }
3699 
3700 void MacroAssembler::pop_FPU_state() {
3701 #ifndef _LP64
3702   frstor(Address(rsp, 0));
3703 #else
3704   fxrstor(Address(rsp, 0));
3705 #endif
3706   addptr(rsp, FPUStateSizeInWords * wordSize);
3707 }
3708 
3709 #ifdef ASSERT
3710 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
3711   Label no_cont;
3712   movptr(cont, Address(r15_thread, in_bytes(JavaThread::continuation_offset())));
3713   testl(cont, cont);
3714   jcc(Assembler::zero, no_cont);
3715   stop(name);
3716   bind(no_cont);
3717 }
3718 #endif
3719 
3720 void MacroAssembler::pop_IU_state() {
3721   popa();
3722   LP64_ONLY(addq(rsp, 8));
3723   popf();
3724 }
3725 
3726 // Save Integer and Float state
3727 // Warning: Stack must be 16 byte aligned (64bit)
3728 void MacroAssembler::push_CPU_state() {
3729   push_IU_state();
3730   push_FPU_state();
3731 }
3732 
3733 void MacroAssembler::push_FPU_state() {
3734   subptr(rsp, FPUStateSizeInWords * wordSize);
3735 #ifndef _LP64
3736   fnsave(Address(rsp, 0));
3737   fwait();
3738 #else
3739   fxsave(Address(rsp, 0));


< prev index next >