< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page

        

*** 45,54 **** --- 45,55 ---- #include "opto/runtime.hpp" #endif #if INCLUDE_JVMCI #include "jvmci/jvmciJavaClasses.hpp" #endif + #include "vm_version_x86.hpp" #define __ masm-> const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
*** 149,160 **** if (UseAVX < 3) { num_xmm_regs = num_xmm_regs/2; } #if defined(COMPILER2) || INCLUDE_JVMCI if (save_vectors) { ! assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX"); ! assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now"); } #else assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); #endif --- 150,161 ---- if (UseAVX < 3) { num_xmm_regs = num_xmm_regs/2; } #if defined(COMPILER2) || INCLUDE_JVMCI if (save_vectors) { ! assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX"); ! assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported"); } #else assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); #endif
*** 204,213 **** --- 205,217 ---- for (int n = 16; n < num_xmm_regs; n++) { __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n)); } } } + if (VM_Version::supports_avx()) { + __ vzeroupper(); + } if (frame::arg_reg_save_area_bytes != 0) { // Allocate argument register save area __ subptr(rsp, frame::arg_reg_save_area_bytes); }
*** 320,336 **** __ addptr(rsp, frame::arg_reg_save_area_bytes); } #if defined(COMPILER2) || INCLUDE_JVMCI if (restore_vectors) { ! assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX"); ! assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now"); } #else assert(!restore_vectors, "vectors are generated only by C2"); #endif // On EVEX enabled targets everything is handled in pop fpu state if (restore_vectors) { // Restore upper half of YMM registers (0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) { --- 324,344 ---- __ addptr(rsp, frame::arg_reg_save_area_bytes); } #if defined(COMPILER2) || INCLUDE_JVMCI if (restore_vectors) { ! assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX"); ! assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported"); } #else assert(!restore_vectors, "vectors are generated only by C2"); #endif + if (VM_Version::supports_avx()) { + __ vzeroupper(); + } + // On EVEX enabled targets everything is handled in pop fpu state if (restore_vectors) { // Restore upper half of YMM registers (0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) {
*** 527,536 **** --- 535,547 ---- // align stack so push_CPU_state doesn't fault __ andptr(rsp, -(StackAlignmentInBytes)); __ push_CPU_state(); + if (VM_Version::supports_avx()) { + __ vzeroupper(); + } // VM needs caller's callsite // VM needs target method // This needs to be a long call since we will relocate this adapter to // the codeBuffer and it may not reach
*** 545,554 **** --- 556,568 ---- // De-allocate argument register save area if (frame::arg_reg_save_area_bytes != 0) { __ addptr(rsp, frame::arg_reg_save_area_bytes); } + if (VM_Version::supports_avx()) { + __ vzeroupper(); + } __ pop_CPU_state(); // restore sp __ mov(rsp, r13); __ bind(L); }
*** 1463,1473 **** __ reset_last_Java_frame(false); save_or_restore_arguments(masm, stack_slots, total_in_args, arg_save_area, NULL, in_regs, in_sig_bt); - __ bind(cont); #ifdef ASSERT if (StressCriticalJNINatives) { // Stress register saving OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); --- 1477,1486 ----
*** 2483,2492 **** --- 2496,2508 ---- // and never return here preventing us from clearing _last_native_pc down below. // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are // preserved and correspond to the bcp/locals pointers. So we do a runtime call // by hand. // + if (VM_Version::supports_avx()) { + __ vzeroupper(); + } save_native_result(masm, ret_type, stack_slots); __ mov(c_rarg0, r15_thread); __ mov(r12, rsp); // remember sp __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows __ andptr(rsp, -16); // align stack as required by ABI
*** 2659,2668 **** --- 2675,2687 ---- // BEGIN Slow path unlock __ bind(slow_path_unlock); // If we haven't already saved the native result we must save it now as xmm registers // are still exposed. + if (VM_Version::supports_avx()) { + __ vzeroupper(); + } if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { save_native_result(masm, ret_type, stack_slots); }
*** 2705,2714 **** --- 2724,2736 ---- } // synchronized // SLOW PATH Reguard the stack if needed __ bind(reguard); + if (VM_Version::supports_avx()) { + __ vzeroupper(); + } save_native_result(masm, ret_type, stack_slots); __ mov(r12, rsp); // remember sp __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows __ andptr(rsp, -16); // align stack as required by ABI __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
< prev index next >