< prev index next >

src/cpu/x86/vm/x86_64.ad

Print this page

        

*** 1046,1088 **** return rc_float; } // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad. static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, ! int src_hi, int dst_hi, uint ireg, outputStream* st); static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, ! int stack_offset, int reg, uint ireg, outputStream* st); static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset, ! int dst_offset, uint ireg, outputStream* st) { if (cbuf) { MacroAssembler _masm(cbuf); switch (ireg) { ! case Op_VecS: __ movq(Address(rsp, -8), rax); __ movl(rax, Address(rsp, src_offset)); __ movl(Address(rsp, dst_offset), rax); __ movq(rax, Address(rsp, -8)); break; ! case Op_VecD: __ pushq(Address(rsp, src_offset)); __ popq (Address(rsp, dst_offset)); break; ! case Op_VecX: __ pushq(Address(rsp, src_offset)); __ popq (Address(rsp, dst_offset)); __ pushq(Address(rsp, src_offset+8)); __ popq (Address(rsp, dst_offset+8)); break; ! case Op_VecY: __ vmovdqu(Address(rsp, -32), xmm0); __ vmovdqu(xmm0, Address(rsp, src_offset)); __ vmovdqu(Address(rsp, dst_offset), xmm0); __ vmovdqu(xmm0, Address(rsp, -32)); break; ! case Op_VecZ: __ evmovdquq(Address(rsp, -64), xmm0, 2); __ evmovdquq(xmm0, Address(rsp, src_offset), 2); __ evmovdquq(Address(rsp, dst_offset), xmm0, 2); __ evmovdquq(xmm0, Address(rsp, -64), 2); break; --- 1046,1088 ---- return rc_float; } // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad. static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, ! int src_hi, int dst_hi, Opcodes ireg, outputStream* st); static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, ! int stack_offset, int reg, Opcodes ireg, outputStream* st); static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset, ! int dst_offset, Opcodes ireg, outputStream* st) { if (cbuf) { MacroAssembler _masm(cbuf); switch (ireg) { ! case Opcodes::Op_VecS: __ movq(Address(rsp, -8), rax); __ movl(rax, Address(rsp, src_offset)); __ movl(Address(rsp, dst_offset), rax); __ movq(rax, Address(rsp, -8)); break; ! case Opcodes::Op_VecD: __ pushq(Address(rsp, src_offset)); __ popq (Address(rsp, dst_offset)); break; ! case Opcodes::Op_VecX: __ pushq(Address(rsp, src_offset)); __ popq (Address(rsp, dst_offset)); __ pushq(Address(rsp, src_offset+8)); __ popq (Address(rsp, dst_offset+8)); break; ! case Opcodes::Op_VecY: __ vmovdqu(Address(rsp, -32), xmm0); __ vmovdqu(xmm0, Address(rsp, src_offset)); __ vmovdqu(Address(rsp, dst_offset), xmm0); __ vmovdqu(xmm0, Address(rsp, -32)); break; ! case Opcodes::Op_VecZ: __ evmovdquq(Address(rsp, -64), xmm0, 2); __ evmovdquq(xmm0, Address(rsp, src_offset), 2); __ evmovdquq(Address(rsp, dst_offset), xmm0, 2); __ evmovdquq(xmm0, Address(rsp, -64), 2); break;
*** 1090,1126 **** ShouldNotReachHere(); } #ifndef PRODUCT } else { switch (ireg) { ! case Op_VecS: st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t" "movl rax, [rsp + #%d]\n\t" "movl [rsp + #%d], rax\n\t" "movq rax, [rsp - #8]", src_offset, dst_offset); break; ! case Op_VecD: st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t" "popq [rsp + #%d]", src_offset, dst_offset); break; ! case Op_VecX: st->print("pushq [rsp + #%d]\t# 128-bit mem-mem spill\n\t" "popq [rsp + #%d]\n\t" "pushq [rsp + #%d]\n\t" "popq [rsp + #%d]", src_offset, dst_offset, src_offset+8, dst_offset+8); break; ! case Op_VecY: st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t" "vmovdqu xmm0, [rsp + #%d]\n\t" "vmovdqu [rsp + #%d], xmm0\n\t" "vmovdqu xmm0, [rsp - #32]", src_offset, dst_offset); break; ! case Op_VecZ: st->print("vmovdqu [rsp - #64], xmm0\t# 512-bit mem-mem spill\n\t" "vmovdqu xmm0, [rsp + #%d]\n\t" "vmovdqu [rsp + #%d], xmm0\n\t" "vmovdqu xmm0, [rsp - #64]", src_offset, dst_offset); --- 1090,1126 ---- ShouldNotReachHere(); } #ifndef PRODUCT } else { switch (ireg) { ! case Opcodes::Op_VecS: st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t" "movl rax, [rsp + #%d]\n\t" "movl [rsp + #%d], rax\n\t" "movq rax, [rsp - #8]", src_offset, dst_offset); break; ! case Opcodes::Op_VecD: st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t" "popq [rsp + #%d]", src_offset, dst_offset); break; ! case Opcodes::Op_VecX: st->print("pushq [rsp + #%d]\t# 128-bit mem-mem spill\n\t" "popq [rsp + #%d]\n\t" "pushq [rsp + #%d]\n\t" "popq [rsp + #%d]", src_offset, dst_offset, src_offset+8, dst_offset+8); break; ! case Opcodes::Op_VecY: st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t" "vmovdqu xmm0, [rsp + #%d]\n\t" "vmovdqu [rsp + #%d], xmm0\n\t" "vmovdqu xmm0, [rsp - #32]", src_offset, dst_offset); break; ! case Opcodes::Op_VecZ: st->print("vmovdqu [rsp - #64], xmm0\t# 512-bit mem-mem spill\n\t" "vmovdqu xmm0, [rsp + #%d]\n\t" "vmovdqu [rsp + #%d], xmm0\n\t" "vmovdqu xmm0, [rsp - #64]", src_offset, dst_offset);
*** 1154,1166 **** if (src_first == dst_first && src_second == dst_second) { // Self copy, no move return 0; } if (bottom_type()->isa_vect() != NULL) { ! uint ireg = ideal_reg(); assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity"); ! assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY || ireg == Op_VecZ ), "sanity"); if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { // mem -> mem int src_offset = ra_->reg2offset(src_first); int dst_offset = ra_->reg2offset(dst_first); vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st); --- 1154,1166 ---- if (src_first == dst_first && src_second == dst_second) { // Self copy, no move return 0; } if (bottom_type()->isa_vect() != NULL) { ! Opcodes ireg = ideal_reg(); assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity"); ! assert((ireg == Opcodes::Op_VecS || ireg == Opcodes::Op_VecD || ireg == Opcodes::Op_VecX || ireg == Opcodes::Op_VecY || ireg == Opcodes::Op_VecZ ), "sanity"); if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { // mem -> mem int src_offset = ra_->reg2offset(src_first); int dst_offset = ra_->reg2offset(dst_first); vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
*** 2818,2853 **** %} // Location of compiled Java return values. Same as C for now. return_value %{ ! assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values"); ! static const int lo[Op_RegL + 1] = { 0, 0, RAX_num, // Op_RegN RAX_num, // Op_RegI RAX_num, // Op_RegP XMM0_num, // Op_RegF XMM0_num, // Op_RegD RAX_num // Op_RegL }; ! static const int hi[Op_RegL + 1] = { 0, 0, OptoReg::Bad, // Op_RegN OptoReg::Bad, // Op_RegI RAX_H_num, // Op_RegP OptoReg::Bad, // Op_RegF XMM0b_num, // Op_RegD RAX_H_num // Op_RegL }; // Excluded flags and vector registers. ! assert(ARRAY_SIZE(hi) == _last_machine_leaf - 6, "missing type"); ! return OptoRegPair(hi[ideal_reg], lo[ideal_reg]); %} %} //----------ATTRIBUTES--------------------------------------------------------- //----------Operand Attributes------------------------------------------------- --- 2818,2853 ---- %} // Location of compiled Java return values. Same as C for now. return_value %{ ! assert(ideal_reg >= Opcodes::Op_RegI && ideal_reg <= Opcodes::Op_RegL, "only return normal values"); ! static const int lo[static_cast<uint>(Opcodes::Op_RegL) + 1] = { 0, 0, RAX_num, // Op_RegN RAX_num, // Op_RegI RAX_num, // Op_RegP XMM0_num, // Op_RegF XMM0_num, // Op_RegD RAX_num // Op_RegL }; ! static const int hi[static_cast<uint>(Opcodes::Op_RegL) + 1] = { 0, 0, OptoReg::Bad, // Op_RegN OptoReg::Bad, // Op_RegI RAX_H_num, // Op_RegP OptoReg::Bad, // Op_RegF XMM0b_num, // Op_RegD RAX_H_num // Op_RegL }; // Excluded flags and vector registers. ! assert(ARRAY_SIZE(hi) == static_cast<uint>(Opcodes::_last_machine_leaf) - 6, "missing type"); ! return OptoRegPair(hi[static_cast<uint>(ideal_reg)], lo[static_cast<uint>(ideal_reg)]); %} %} //----------ATTRIBUTES--------------------------------------------------------- //----------Operand Attributes-------------------------------------------------
< prev index next >