< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page

        

*** 45,54 **** --- 45,55 ---- #include "opto/runtime.hpp" #endif #if INCLUDE_JVMCI #include "jvmci/jvmciJavaClasses.hpp" #endif + #include "vm_version_x86.hpp" #define __ masm-> const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
*** 149,160 **** if (UseAVX < 3) { num_xmm_regs = num_xmm_regs/2; } #if defined(COMPILER2) || INCLUDE_JVMCI if (save_vectors) { ! assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX"); ! assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now"); } #else assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); #endif --- 150,161 ---- if (UseAVX < 3) { num_xmm_regs = num_xmm_regs/2; } #if defined(COMPILER2) || INCLUDE_JVMCI if (save_vectors) { ! assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX"); ! assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported"); } #else assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); #endif
*** 204,213 **** --- 205,215 ---- for (int n = 16; n < num_xmm_regs; n++) { __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n)); } } } + __ vzeroupper(); if (frame::arg_reg_save_area_bytes != 0) { // Allocate argument register save area __ subptr(rsp, frame::arg_reg_save_area_bytes); }
*** 320,336 **** __ addptr(rsp, frame::arg_reg_save_area_bytes); } #if defined(COMPILER2) || INCLUDE_JVMCI if (restore_vectors) { ! assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX"); ! assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now"); } #else assert(!restore_vectors, "vectors are generated only by C2"); #endif // On EVEX enabled targets everything is handled in pop fpu state if (restore_vectors) { // Restore upper half of YMM registers (0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) { --- 322,340 ---- __ addptr(rsp, frame::arg_reg_save_area_bytes); } #if defined(COMPILER2) || INCLUDE_JVMCI if (restore_vectors) { ! assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX"); ! assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported"); } #else assert(!restore_vectors, "vectors are generated only by C2"); #endif + __ vzeroupper(); + // On EVEX enabled targets everything is handled in pop fpu state if (restore_vectors) { // Restore upper half of YMM registers (0..15) int base_addr = XSAVE_AREA_YMM_BEGIN; for (int n = 0; n < 16; n++) {
*** 526,536 **** __ movptr(rax, Address(rsp, 0)); // align stack so push_CPU_state doesn't fault __ andptr(rsp, -(StackAlignmentInBytes)); __ push_CPU_state(); ! // VM needs caller's callsite // VM needs target method // This needs to be a long call since we will relocate this adapter to // the codeBuffer and it may not reach --- 530,540 ---- __ movptr(rax, Address(rsp, 0)); // align stack so push_CPU_state doesn't fault __ andptr(rsp, -(StackAlignmentInBytes)); __ push_CPU_state(); ! __ vzeroupper(); // VM needs caller's callsite // VM needs target method // This needs to be a long call since we will relocate this adapter to // the codeBuffer and it may not reach
*** 545,554 **** --- 549,559 ---- // De-allocate argument register save area if (frame::arg_reg_save_area_bytes != 0) { __ addptr(rsp, frame::arg_reg_save_area_bytes); } + __ vzeroupper(); __ pop_CPU_state(); // restore sp __ mov(rsp, r13); __ bind(L); }
*** 1463,1473 **** __ reset_last_Java_frame(false); save_or_restore_arguments(masm, stack_slots, total_in_args, arg_save_area, NULL, in_regs, in_sig_bt); - __ bind(cont); #ifdef ASSERT if (StressCriticalJNINatives) { // Stress register saving OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); --- 1468,1477 ----
*** 2483,2492 **** --- 2487,2497 ---- // and never return here preventing us from clearing _last_native_pc down below. // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are // preserved and correspond to the bcp/locals pointers. So we do a runtime call // by hand. // + __ vzeroupper(); save_native_result(masm, ret_type, stack_slots); __ mov(c_rarg0, r15_thread); __ mov(r12, rsp); // remember sp __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows __ andptr(rsp, -16); // align stack as required by ABI
*** 2659,2669 **** // BEGIN Slow path unlock __ bind(slow_path_unlock); // If we haven't already saved the native result we must save it now as xmm registers // are still exposed. ! if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { save_native_result(masm, ret_type, stack_slots); } __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); --- 2664,2674 ---- // BEGIN Slow path unlock __ bind(slow_path_unlock); // If we haven't already saved the native result we must save it now as xmm registers // are still exposed. ! __ vzeroupper(); if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { save_native_result(masm, ret_type, stack_slots); } __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
*** 2705,2714 **** --- 2710,2720 ---- } // synchronized // SLOW PATH Reguard the stack if needed __ bind(reguard); + __ vzeroupper(); save_native_result(masm, ret_type, stack_slots); __ mov(r12, rsp); // remember sp __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows __ andptr(rsp, -16); // align stack as required by ABI __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
< prev index next >