< prev index next >

src/cpu/x86/vm/x86_64.ad

Print this page

        

*** 534,544 **** #define RELOC_DISP32 Assembler::disp32_operand #define __ _masm. static int clear_avx_size() { ! return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper } // !!!!! Special hack to get all types of calls to specify the byte offset // from the start of the call to the point where the return address // will point. --- 534,544 ---- #define RELOC_DISP32 Assembler::disp32_operand #define __ _masm. static int clear_avx_size() { ! return (VM_Version::supports_vzeroupper()) ? 3: 0; // vzeroupper } // !!!!! Special hack to get all types of calls to specify the byte offset // from the start of the call to the point where the return address // will point.
*** 917,927 **** //============================================================================= #ifndef PRODUCT void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const { Compile* C = ra_->C; ! if (C->max_vector_size() > 16) { st->print("vzeroupper"); st->cr(); st->print("\t"); } int framesize = C->frame_size_in_bytes(); --- 917,927 ---- //============================================================================= #ifndef PRODUCT void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const { Compile* C = ra_->C; ! if (VM_Version::supports_vzeroupper()) { st->print("vzeroupper"); st->cr(); st->print("\t"); } int framesize = C->frame_size_in_bytes();
*** 953,967 **** void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { Compile* C = ra_->C; MacroAssembler _masm(&cbuf); - if (C->max_vector_size() > 16) { // Clear upper bits of YMM registers when current compiled code uses // wide vectors to avoid AVX <-> SSE transition penalty during call. __ vzeroupper(); - } int framesize = C->frame_size_in_bytes(); assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); // Remove word for return adr already pushed // and RBP --- 953,965 ----
*** 2090,2105 **** __ bind(miss); %} enc_class clear_avx %{ debug_only(int off0 = cbuf.insts_size()); ! if (ra_->C->max_vector_size() > 16) { // Clear upper bits of YMM registers when current compiled code uses // wide vectors to avoid AVX <-> SSE transition penalty during call. MacroAssembler _masm(&cbuf); __ vzeroupper(); - } debug_only(int off1 = cbuf.insts_size()); assert(off1 - off0 == clear_avx_size(), "correct size prediction"); %} enc_class Java_To_Runtime(method meth) %{ --- 2088,2102 ---- __ bind(miss); %} enc_class clear_avx %{ debug_only(int off0 = cbuf.insts_size()); ! // Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty // Clear upper bits of YMM registers when current compiled code uses // wide vectors to avoid AVX <-> SSE transition penalty during call. MacroAssembler _masm(&cbuf); __ vzeroupper(); debug_only(int off1 = cbuf.insts_size()); assert(off1 - off0 == clear_avx_size(), "correct size prediction"); %} enc_class Java_To_Runtime(method meth) %{
*** 12114,12124 **** match(CallLeafNoFP); effect(USE meth); ins_cost(300); format %{ "call_leaf_nofp,runtime " %} ! ins_encode(Java_To_Runtime(meth)); ins_pipe(pipe_slow); %} // Return Instruction // Remove the return address & jump to it. --- 12111,12121 ---- match(CallLeafNoFP); effect(USE meth); ins_cost(300); format %{ "call_leaf_nofp,runtime " %} ! ins_encode(clear_avx, Java_To_Runtime(meth)); ins_pipe(pipe_slow); %} // Return Instruction // Remove the return address & jump to it.
< prev index next >