< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_32.cpp

Print this page

        

*** 39,48 **** --- 39,49 ---- #include "c1/c1_Runtime1.hpp" #endif #ifdef COMPILER2 #include "opto/runtime.hpp" #endif + #include "vm_version_x86.hpp" #define __ masm-> const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
*** 118,129 **** int num_xmm_regs = XMMRegisterImpl::number_of_registers; int ymm_bytes = num_xmm_regs * 16; int zmm_bytes = num_xmm_regs * 32; #ifdef COMPILER2 if (save_vectors) { ! assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX"); ! assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now"); // Save upper half of YMM registers int vect_bytes = ymm_bytes; if (UseAVX > 2) { // Save upper half of ZMM registers as well vect_bytes += zmm_bytes; --- 119,130 ---- int num_xmm_regs = XMMRegisterImpl::number_of_registers; int ymm_bytes = num_xmm_regs * 16; int zmm_bytes = num_xmm_regs * 32; #ifdef COMPILER2 if (save_vectors) { ! assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX"); ! assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported"); // Save upper half of YMM registers int vect_bytes = ymm_bytes; if (UseAVX > 2) { // Save upper half of ZMM registers as well vect_bytes += zmm_bytes;
*** 217,226 **** --- 218,228 ---- for (int n = 0; n < num_xmm_regs; n++) { __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n)); } } } + __ vzeroupper(); // Set an oopmap for the call site. This oopmap will map all // oop-registers and debug-info registers as callee-saved. This // will allow deoptimization at this safepoint to find all possible // debug-info recordings, as well as let GC find all oops.
*** 267,278 **** int zmm_bytes = num_xmm_regs * 32; // Recover XMM & FPU state int additional_frame_bytes = 0; #ifdef COMPILER2 if (restore_vectors) { ! assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX"); ! assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now"); // Save upper half of YMM registers additional_frame_bytes = ymm_bytes; if (UseAVX > 2) { // Save upper half of ZMM registers as well additional_frame_bytes += zmm_bytes; --- 269,280 ---- int zmm_bytes = num_xmm_regs * 32; // Recover XMM & FPU state int additional_frame_bytes = 0; #ifdef COMPILER2 if (restore_vectors) { ! assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX"); ! assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported"); // Save upper half of YMM registers additional_frame_bytes = ymm_bytes; if (UseAVX > 2) { // Save upper half of ZMM registers as well additional_frame_bytes += zmm_bytes;
*** 283,292 **** --- 285,296 ---- #endif int off = xmm0_off; int delta = xmm1_off - off; + __ vzeroupper(); + if (UseSSE == 1) { // Restore XMM registers assert(additional_frame_bytes == 0, ""); for (int n = 0; n < num_xmm_regs; n++) { __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
*** 2121,2130 **** --- 2125,2136 ---- // and never return here preventing us from clearing _last_native_pc down below. // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are // preserved and correspond to the bcp/locals pointers. So we do a runtime call // by hand. // + __ vzeroupper(); + save_native_result(masm, ret_type, stack_slots); __ push(thread); if (!is_critical_native) { __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
*** 2305,2315 **** // END Slow path lock // BEGIN Slow path unlock __ bind(slow_path_unlock); ! // Slow path unlock if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { save_native_result(masm, ret_type, stack_slots); } --- 2311,2321 ---- // END Slow path lock // BEGIN Slow path unlock __ bind(slow_path_unlock); ! __ vzeroupper(); // Slow path unlock if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { save_native_result(masm, ret_type, stack_slots); }
*** 2350,2359 **** --- 2356,2366 ---- } // SLOW PATH Reguard the stack if needed __ bind(reguard); + __ vzeroupper(); save_native_result(masm, ret_type, stack_slots); { __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); } restore_native_result(masm, ret_type, stack_slots);
< prev index next >