src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page
rev 10354 : imported patch vextrinscleanup2


 162   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 163   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 164   // CodeBlob frame size is in words.
 165   int frame_size_in_words = frame_size_in_bytes / wordSize;
 166   *total_frame_words = frame_size_in_words;
 167 
 168   // Save registers, fpu state, and flags.
 169   // We assume caller has already pushed the return address onto the
 170   // stack, so rsp is 8-byte aligned here.
 171   // We push rpb twice in this sequence because we want the real rbp
 172   // to be under the return like a normal enter.
 173 
 174   __ enter();          // rsp becomes 16-byte aligned here
 175   __ push_CPU_state(); // Push a multiple of 16 bytes
 176 
 177   // push cpu state handles this on EVEX enabled targets
 178   if (save_vectors) {
 179     // Save upper half of YMM registers(0..15)
 180     int base_addr = XSAVE_AREA_YMM_BEGIN;
 181     for (int n = 0; n < 16; n++) {
 182       __ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n));
 183     }
 184     if (VM_Version::supports_evex()) {
 185       // Save upper half of ZMM registers(0..15)
 186       base_addr = XSAVE_AREA_ZMM_BEGIN;
 187       for (int n = 0; n < 16; n++) {
 188         __ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1);
 189       }
 190       // Save full ZMM registers(16..num_xmm_regs)
 191       base_addr = XSAVE_AREA_UPPERBANK;
 192       off = 0;
 193       int vector_len = Assembler::AVX_512bit;
 194       for (int n = 16; n < num_xmm_regs; n++) {
 195         __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
 196       }
 197     }
 198   } else {
 199     if (VM_Version::supports_evex()) {
 200       // Save upper bank of ZMM registers(16..31) for double/float usage
 201       int base_addr = XSAVE_AREA_UPPERBANK;
 202       off = 0;
 203       for (int n = 16; n < num_xmm_regs; n++) {
 204         __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
 205       }
 206     }
 207   }
 208   if (frame::arg_reg_save_area_bytes != 0) {


 316   }
 317   if (frame::arg_reg_save_area_bytes != 0) {
 318     // Pop arg register save area
 319     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 320   }
 321 
 322 #if defined(COMPILER2) || INCLUDE_JVMCI
 323   if (restore_vectors) {
 324     assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
 325     assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
 326   }
 327 #else
 328   assert(!restore_vectors, "vectors are generated only by C2");
 329 #endif
 330 
 331   // On EVEX enabled targets everything is handled in pop fpu state
 332   if (restore_vectors) {
 333     // Restore upper half of YMM registers (0..15)
 334     int base_addr = XSAVE_AREA_YMM_BEGIN;
 335     for (int n = 0; n < 16; n++) {
 336       __ vinsertf128h(as_XMMRegister(n), Address(rsp,  base_addr+n*16));
 337     }
 338     if (VM_Version::supports_evex()) {
 339       // Restore upper half of ZMM registers (0..15)
 340       base_addr = XSAVE_AREA_ZMM_BEGIN;
 341       for (int n = 0; n < 16; n++) {
 342         __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1);
 343       }
 344       // Restore full ZMM registers(16..num_xmm_regs)
 345       base_addr = XSAVE_AREA_UPPERBANK;
 346       int vector_len = Assembler::AVX_512bit;
 347       int off = 0;
 348       for (int n = 16; n < num_xmm_regs; n++) {
 349         __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
 350       }
 351     }
 352   } else {
 353     if (VM_Version::supports_evex()) {
 354       // Restore upper bank of ZMM registers(16..31) for double/float usage
 355       int base_addr = XSAVE_AREA_UPPERBANK;
 356       int off = 0;
 357       for (int n = 16; n < num_xmm_regs; n++) {
 358         __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
 359       }
 360     }
 361   }
 362 




 162   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
 163   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
 164   // CodeBlob frame size is in words.
 165   int frame_size_in_words = frame_size_in_bytes / wordSize;
 166   *total_frame_words = frame_size_in_words;
 167 
 168   // Save registers, fpu state, and flags.
 169   // We assume caller has already pushed the return address onto the
 170   // stack, so rsp is 8-byte aligned here.
 171   // We push rpb twice in this sequence because we want the real rbp
 172   // to be under the return like a normal enter.
 173 
 174   __ enter();          // rsp becomes 16-byte aligned here
 175   __ push_CPU_state(); // Push a multiple of 16 bytes
 176 
 177   // push cpu state handles this on EVEX enabled targets
 178   if (save_vectors) {
 179     // Save upper half of YMM registers(0..15)
 180     int base_addr = XSAVE_AREA_YMM_BEGIN;
 181     for (int n = 0; n < 16; n++) {
 182       __ vextractf128(Address(rsp, base_addr+n*16), as_XMMRegister(n), 1);
 183     }
 184     if (VM_Version::supports_evex()) {
 185       // Save upper half of ZMM registers(0..15)
 186       base_addr = XSAVE_AREA_ZMM_BEGIN;
 187       for (int n = 0; n < 16; n++) {
 188         __ vextractf64x4(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1);
 189       }
 190       // Save full ZMM registers(16..num_xmm_regs)
 191       base_addr = XSAVE_AREA_UPPERBANK;
 192       off = 0;
 193       int vector_len = Assembler::AVX_512bit;
 194       for (int n = 16; n < num_xmm_regs; n++) {
 195         __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
 196       }
 197     }
 198   } else {
 199     if (VM_Version::supports_evex()) {
 200       // Save upper bank of ZMM registers(16..31) for double/float usage
 201       int base_addr = XSAVE_AREA_UPPERBANK;
 202       off = 0;
 203       for (int n = 16; n < num_xmm_regs; n++) {
 204         __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
 205       }
 206     }
 207   }
 208   if (frame::arg_reg_save_area_bytes != 0) {


 316   }
 317   if (frame::arg_reg_save_area_bytes != 0) {
 318     // Pop arg register save area
 319     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 320   }
 321 
 322 #if defined(COMPILER2) || INCLUDE_JVMCI
 323   if (restore_vectors) {
 324     assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
 325     assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
 326   }
 327 #else
 328   assert(!restore_vectors, "vectors are generated only by C2");
 329 #endif
 330 
 331   // On EVEX enabled targets everything is handled in pop fpu state
 332   if (restore_vectors) {
 333     // Restore upper half of YMM registers (0..15)
 334     int base_addr = XSAVE_AREA_YMM_BEGIN;
 335     for (int n = 0; n < 16; n++) {
 336       __ vinsertf128(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, base_addr+n*16), 1);
 337     }
 338     if (VM_Version::supports_evex()) {
 339       // Restore upper half of ZMM registers (0..15)
 340       base_addr = XSAVE_AREA_ZMM_BEGIN;
 341       for (int n = 0; n < 16; n++) {
 342         __ vinsertf64x4(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, base_addr+n*32), 1);
 343       }
 344       // Restore full ZMM registers(16..num_xmm_regs)
 345       base_addr = XSAVE_AREA_UPPERBANK;
 346       int vector_len = Assembler::AVX_512bit;
 347       int off = 0;
 348       for (int n = 16; n < num_xmm_regs; n++) {
 349         __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
 350       }
 351     }
 352   } else {
 353     if (VM_Version::supports_evex()) {
 354       // Restore upper bank of ZMM registers(16..31) for double/float usage
 355       int base_addr = XSAVE_AREA_UPPERBANK;
 356       int off = 0;
 357       for (int n = 16; n < num_xmm_regs; n++) {
 358         __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
 359       }
 360     }
 361   }
 362 


src/cpu/x86/vm/sharedRuntime_x86_64.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File