< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page




 134   // Used by deoptimization when it is managing result register
 135   // values on its own
 136 
 137   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 138   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 139   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
 140   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 141   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 142 
 143   // During deoptimization only the result registers need to be restored,
 144   // all the other values have already been extracted.
 145   static void restore_result_registers(MacroAssembler* masm);
 146 };
 147 
 148 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 149   int off = 0;
 150   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 151   if (UseAVX < 3) {
 152     num_xmm_regs = num_xmm_regs/2;
 153   }
 154 #if defined(COMPILER2) || INCLUDE_JVMCI
 155   if (save_vectors) {
 156     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 157     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 158   }
 159 #else
 160   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 161 #endif
 162 
 163   // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
 164   int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
 165   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 166   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 167   // CodeBlob frame size is in words.
 168   int frame_size_in_words = frame_size_in_bytes / wordSize;
 169   *total_frame_words = frame_size_in_words;
 170 
 171   // Save registers, fpu state, and flags.
 172   // We assume caller has already pushed the return address onto the
 173   // stack, so rsp is 8-byte aligned here.
 174   // We push rpb twice in this sequence because we want the real rbp


 243   // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 244   // on EVEX enabled targets, we get it included in the xsave area
 245   off = xmm0_off;
 246   int delta = xmm1_off - off;
 247   for (int n = 0; n < 16; n++) {
 248     XMMRegister xmm_name = as_XMMRegister(n);
 249     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 250     off += delta;
 251   }
 252   if(UseAVX > 2) {
 253     // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 254     off = zmm16_off;
 255     delta = zmm17_off - off;
 256     for (int n = 16; n < num_xmm_regs; n++) {
 257       XMMRegister zmm_name = as_XMMRegister(n);
 258       map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
 259       off += delta;
 260     }
 261   }
 262 
 263 #if defined(COMPILER2) || INCLUDE_JVMCI
 264   if (save_vectors) {
 265     off = ymm0_off;
 266     int delta = ymm1_off - off;
 267     for (int n = 0; n < 16; n++) {
 268       XMMRegister ymm_name = as_XMMRegister(n);
 269       map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
 270       off += delta;
 271     }
 272   }
 273 #endif // COMPILER2 || INCLUDE_JVMCI
 274 
 275   // %%% These should all be a waste but we'll keep things as they were for now
 276   if (true) {
 277     map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
 278     map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
 279     map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
 280     map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
 281     // rbp location is known implicitly by the frame sender code, needs no oopmap
 282     map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
 283     map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
 284     map->set_callee_saved(STACK_OFFSET( r8H_off  ), r8->as_VMReg()->next());
 285     map->set_callee_saved(STACK_OFFSET( r9H_off  ), r9->as_VMReg()->next());
 286     map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
 287     map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
 288     map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
 289     map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
 290     map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
 291     map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
 292     // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 293     // on EVEX enabled targets, we get it included in the xsave area


 306         XMMRegister zmm_name = as_XMMRegister(n);
 307         map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
 308         off += delta;
 309       }
 310     }
 311   }
 312 
 313   return map;
 314 }
 315 
 316 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 317   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 318   if (UseAVX < 3) {
 319     num_xmm_regs = num_xmm_regs/2;
 320   }
 321   if (frame::arg_reg_save_area_bytes != 0) {
 322     // Pop arg register save area
 323     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 324   }
 325 
 326 #if defined(COMPILER2) || INCLUDE_JVMCI
 327   if (restore_vectors) {
 328     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 329     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 330   }
 331 #else
 332   assert(!restore_vectors, "vectors are generated only by C2");
 333 #endif
 334 
 335   __ vzeroupper();
 336 
 337   // On EVEX enabled targets everything is handled in pop fpu state
 338   if (restore_vectors) {
 339     // Restore upper half of YMM registers (0..15)
 340     int base_addr = XSAVE_AREA_YMM_BEGIN;
 341     for (int n = 0; n < 16; n++) {
 342       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
 343     }
 344     if (VM_Version::supports_evex()) {
 345       // Restore upper half of ZMM registers (0..15)
 346       base_addr = XSAVE_AREA_ZMM_BEGIN;




 134   // Used by deoptimization when it is managing result register
 135   // values on its own
 136 
 137   static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
 138   static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
 139   static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
 140   static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
 141   static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
 142 
 143   // During deoptimization only the result registers need to be restored,
 144   // all the other values have already been extracted.
 145   static void restore_result_registers(MacroAssembler* masm);
 146 };
 147 
 148 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
 149   int off = 0;
 150   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 151   if (UseAVX < 3) {
 152     num_xmm_regs = num_xmm_regs/2;
 153   }
 154 #if COMPILER2_OR_JVMCI
 155   if (save_vectors) {
 156     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 157     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 158   }
 159 #else
 160   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 161 #endif
 162 
 163   // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
 164   int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
 165   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 166   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 167   // CodeBlob frame size is in words.
 168   int frame_size_in_words = frame_size_in_bytes / wordSize;
 169   *total_frame_words = frame_size_in_words;
 170 
 171   // Save registers, fpu state, and flags.
 172   // We assume caller has already pushed the return address onto the
 173   // stack, so rsp is 8-byte aligned here.
 174   // We push rpb twice in this sequence because we want the real rbp


 243   // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 244   // on EVEX enabled targets, we get it included in the xsave area
 245   off = xmm0_off;
 246   int delta = xmm1_off - off;
 247   for (int n = 0; n < 16; n++) {
 248     XMMRegister xmm_name = as_XMMRegister(n);
 249     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 250     off += delta;
 251   }
 252   if(UseAVX > 2) {
 253     // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
 254     off = zmm16_off;
 255     delta = zmm17_off - off;
 256     for (int n = 16; n < num_xmm_regs; n++) {
 257       XMMRegister zmm_name = as_XMMRegister(n);
 258       map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
 259       off += delta;
 260     }
 261   }
 262 
 263 #if COMPILER2_OR_JVMCI
 264   if (save_vectors) {
 265     off = ymm0_off;
 266     int delta = ymm1_off - off;
 267     for (int n = 0; n < 16; n++) {
 268       XMMRegister ymm_name = as_XMMRegister(n);
 269       map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
 270       off += delta;
 271     }
 272   }
 273 #endif // COMPILER2_OR_JVMCI
 274 
 275   // %%% These should all be a waste but we'll keep things as they were for now
 276   if (true) {
 277     map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
 278     map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
 279     map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
 280     map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
 281     // rbp location is known implicitly by the frame sender code, needs no oopmap
 282     map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
 283     map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
 284     map->set_callee_saved(STACK_OFFSET( r8H_off  ), r8->as_VMReg()->next());
 285     map->set_callee_saved(STACK_OFFSET( r9H_off  ), r9->as_VMReg()->next());
 286     map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
 287     map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
 288     map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
 289     map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
 290     map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
 291     map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
 292     // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
 293     // on EVEX enabled targets, we get it included in the xsave area


 306         XMMRegister zmm_name = as_XMMRegister(n);
 307         map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
 308         off += delta;
 309       }
 310     }
 311   }
 312 
 313   return map;
 314 }
 315 
 316 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 317   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 318   if (UseAVX < 3) {
 319     num_xmm_regs = num_xmm_regs/2;
 320   }
 321   if (frame::arg_reg_save_area_bytes != 0) {
 322     // Pop arg register save area
 323     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 324   }
 325 
 326 #if COMPILER2_OR_JVMCI
 327   if (restore_vectors) {
 328     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 329     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 330   }
 331 #else
 332   assert(!restore_vectors, "vectors are generated only by C2");
 333 #endif
 334 
 335   __ vzeroupper();
 336 
 337   // On EVEX enabled targets everything is handled in pop fpu state
 338   if (restore_vectors) {
 339     // Restore upper half of YMM registers (0..15)
 340     int base_addr = XSAVE_AREA_YMM_BEGIN;
 341     for (int n = 0; n < 16; n++) {
 342       __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
 343     }
 344     if (VM_Version::supports_evex()) {
 345       // Restore upper half of ZMM registers (0..15)
 346       base_addr = XSAVE_AREA_ZMM_BEGIN;


< prev index next >