< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_32.cpp

Print this page




 175   }
 176 
 177   int off = st0_off;
 178   int delta = st1_off - off;
 179 
 180   // Save the FPU registers in de-opt-able form
 181   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 182     __ fstp_d(Address(rsp, off*wordSize));
 183     off += delta;
 184   }
 185 
 186   off = xmm0_off;
 187   delta = xmm1_off - off;
 188   if(UseSSE == 1) {           // Save the XMM state
 189     for (int n = 0; n < num_xmm_regs; n++) {
 190       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 191       off += delta;
 192     }
 193   } else if(UseSSE >= 2) {
 194     // Save whole 128bit (16 bytes) XMM regiters
 195     if (VM_Version::supports_avx512novl()) {
 196       for (int n = 0; n < num_xmm_regs; n++) {
 197         __ vextractf32x4h(Address(rsp, off*wordSize), as_XMMRegister(n), 0);
 198         off += delta;
 199       }
 200     } else {
 201       for (int n = 0; n < num_xmm_regs; n++) {
 202         __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 203         off += delta;
 204       }
 205     }
 206   }
 207 
 208   if (vect_words > 0) {
 209     assert(vect_words*wordSize == 128, "");
 210     __ subptr(rsp, 128); // Save upper half of YMM registes
 211     off = 0;
 212     for (int n = 0; n < num_xmm_regs; n++) {
 213       __ vextractf128h(Address(rsp, off++*16), as_XMMRegister(n));
 214     }
 215     if (UseAVX > 2) {
 216       __ subptr(rsp, 256); // Save upper half of ZMM registes
 217       off = 0;
 218       for (int n = 0; n < num_xmm_regs; n++) {
 219         __ vextractf64x4h(Address(rsp, off++*32), as_XMMRegister(n));
 220       }
 221     }
 222   }
 223 
 224   // Set an oopmap for the call site.  This oopmap will map all
 225   // oop-registers and debug-info registers as callee-saved.  This
 226   // will allow deoptimization at this safepoint to find all possible
 227   // debug-info recordings, as well as let GC find all oops.
 228 
 229   OopMapSet *oop_maps = new OopMapSet();
 230   OopMap* map =  new OopMap( frame_words, 0 );
 231 
 232 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 233 #define NEXTREG(x) (x)->as_VMReg()->next()
 234 
 235   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 236   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 238   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 239   // rbp, location is known implicitly, no oopMap


 268   int additional_frame_bytes = 0;
 269 #ifdef COMPILER2
 270   if (restore_vectors) {
 271     assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
 272     assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
 273     additional_frame_bytes = 128;
 274   }
 275 #else
 276   assert(!restore_vectors, "vectors are generated only by C2");
 277 #endif
 278   int off = xmm0_off;
 279   int delta = xmm1_off - off;
 280 
 281   if (UseSSE == 1) {
 282     assert(additional_frame_bytes == 0, "");
 283     for (int n = 0; n < num_xmm_regs; n++) {
 284       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 285       off += delta;
 286     }
 287   } else if (UseSSE >= 2) {
 288     if (VM_Version::supports_avx512novl()) {
 289       for (int n = 0; n < num_xmm_regs; n++) {
 290         __ vinsertf32x4h(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes), 0);
 291         off += delta;
 292       }
 293     } else {
 294       for (int n = 0; n < num_xmm_regs; n++) {
 295         __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 296         off += delta;
 297       }
 298     }
 299   }
 300   if (restore_vectors) {

 301     if (UseAVX > 2) {
 302       off = 0;
 303       for (int n = 0; n < num_xmm_regs; n++) {
 304         __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, off++*32));
 305       }
 306       __ addptr(rsp, additional_frame_bytes*2); // Save upper half of ZMM registes
 307     }
 308     // Restore upper half of YMM registes.
 309     assert(additional_frame_bytes == 128, "");
 310     off = 0;
 311     for (int n = 0; n < num_xmm_regs; n++) {
 312       __ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16));
 313     }
 314     __ addptr(rsp, additional_frame_bytes); // Save upper half of YMM registes
 315   }
 316   __ pop_FPU_state();
 317   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 318 
 319   __ popf();
 320   __ popa();
 321   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 322   __ pop(rbp);
 323 
 324 }
 325 
 326 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 327 
 328   // Just restore result register. Only used by deoptimization. By
 329   // now any callee save register that needs to be restore to a c2
 330   // caller of the deoptee has been extracted into the vframeArray
 331   // and will be stuffed into the c2i adapter we create for later
 332   // restoration so only result registers need to be restored here.




 175   }
 176 
 177   int off = st0_off;
 178   int delta = st1_off - off;
 179 
 180   // Save the FPU registers in de-opt-able form
 181   for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
 182     __ fstp_d(Address(rsp, off*wordSize));
 183     off += delta;
 184   }
 185 
 186   off = xmm0_off;
 187   delta = xmm1_off - off;
 188   if(UseSSE == 1) {           // Save the XMM state
 189     for (int n = 0; n < num_xmm_regs; n++) {
 190       __ movflt(Address(rsp, off*wordSize), as_XMMRegister(n));
 191       off += delta;
 192     }
 193   } else if(UseSSE >= 2) {
 194     // Save whole 128bit (16 bytes) XMM regiters






 195     for (int n = 0; n < num_xmm_regs; n++) {
 196       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 197       off += delta;
 198     }
 199   }

 200 
 201   if (vect_words > 0) {
 202     assert(vect_words*wordSize == 128, "");
 203     __ subptr(rsp, 128); // Save upper half of YMM registes

 204     for (int n = 0; n < num_xmm_regs; n++) {
 205       __ vextractf128h(Address(rsp, n*16), as_XMMRegister(n));
 206     }
 207     if (UseAVX > 2) {
 208       __ subptr(rsp, 256); // Save upper half of ZMM registes

 209       for (int n = 0; n < num_xmm_regs; n++) {
 210         __ vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1);
 211       }
 212     }
 213   }
 214 
 215   // Set an oopmap for the call site.  This oopmap will map all
 216   // oop-registers and debug-info registers as callee-saved.  This
 217   // will allow deoptimization at this safepoint to find all possible
 218   // debug-info recordings, as well as let GC find all oops.
 219 
 220   OopMapSet *oop_maps = new OopMapSet();
 221   OopMap* map =  new OopMap( frame_words, 0 );
 222 
 223 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 224 #define NEXTREG(x) (x)->as_VMReg()->next()
 225 
 226   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 227   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 228   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 229   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 230   // rbp, location is known implicitly, no oopMap


 259   int additional_frame_bytes = 0;
 260 #ifdef COMPILER2
 261   if (restore_vectors) {
 262     assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
 263     assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
 264     additional_frame_bytes = 128;
 265   }
 266 #else
 267   assert(!restore_vectors, "vectors are generated only by C2");
 268 #endif
 269   int off = xmm0_off;
 270   int delta = xmm1_off - off;
 271 
 272   if (UseSSE == 1) {
 273     assert(additional_frame_bytes == 0, "");
 274     for (int n = 0; n < num_xmm_regs; n++) {
 275       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 276       off += delta;
 277     }
 278   } else if (UseSSE >= 2) {






 279     for (int n = 0; n < num_xmm_regs; n++) {
 280       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 281       off += delta;
 282     }
 283   }

 284   if (restore_vectors) {
 285     assert(additional_frame_bytes == 128, "");
 286     if (UseAVX > 2) {
 287       // Restore upper half of ZMM registers.
 288       for (int n = 0; n < num_xmm_regs; n++) {
 289         __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1);
 290       }
 291       __ addptr(rsp, additional_frame_bytes*2); // Save upper half of ZMM registes
 292     }
 293     // Restore upper half of YMM registes.


 294     for (int n = 0; n < num_xmm_regs; n++) {
 295       __ vinsertf128h(as_XMMRegister(n), Address(rsp, n*16));
 296     }
 297     __ addptr(rsp, additional_frame_bytes); // Save upper half of YMM registes
 298   }
 299   __ pop_FPU_state();
 300   __ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
 301 
 302   __ popf();
 303   __ popa();
 304   // Get the rbp, described implicitly by the frame sender code (no oopMap)
 305   __ pop(rbp);
 306 
 307 }
 308 
 309 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 310 
 311   // Just restore result register. Only used by deoptimization. By
 312   // now any callee save register that needs to be restore to a c2
 313   // caller of the deoptee has been extracted into the vframeArray
 314   // and will be stuffed into the c2i adapter we create for later
 315   // restoration so only result registers need to be restored here.


< prev index next >