src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/x86/vm/assembler_x86.hpp	Sat Jun  2 20:03:58 2012
--- new/src/cpu/x86/vm/assembler_x86.hpp	Sat Jun  2 20:03:58 2012

*** 1,7 **** --- 1,7 ---- /* ! * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. ! * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 589,610 **** --- 589,614 ---- VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256); void vex_prefix(XMMRegister dst, XMMRegister nds, Address src, VexSimdPrefix pre, bool vector256 = false) { vex_prefix(src, nds->encoding(), dst->encoding(), ! pre, VEX_OPCODE_0F, false, vector256); + int dst_enc = dst->encoding(); ! int nds_enc = nds->is_valid() ? nds->encoding() : 0; + vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256); } int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256); int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, ! VexSimdPrefix pre, bool vector256 = false) { return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), ! pre, VEX_OPCODE_0F, false, vector256); ! VexSimdPrefix pre, bool vector256 = false, + VexOpcode opc = VEX_OPCODE_0F) { ! int src_enc = src->encoding(); + int dst_enc = dst->encoding(); + int nds_enc = nds->is_valid() ? nds->encoding() : 0; + return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256); } void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, bool rex_w = false, bool vector256 = false);
*** 1259,1268 **** --- 1263,1273 ---- void movb(Register dst, Address src); void movdl(XMMRegister dst, Register src); void movdl(Register dst, XMMRegister src); void movdl(XMMRegister dst, Address src); + void movdl(Address dst, XMMRegister src); // Move Double Quadword void movdq(XMMRegister dst, Register src); void movdq(Register dst, XMMRegister src);
*** 1272,1281 **** --- 1277,1294 ---- // Move Unaligned Double Quadword void movdqu(Address dst, XMMRegister src); void movdqu(XMMRegister dst, Address src); void movdqu(XMMRegister dst, XMMRegister src); + // Move Unaligned 256bit Vector + void vmovdqu(Address dst, XMMRegister src); + void vmovdqu(XMMRegister dst, Address src); + void vmovdqu(XMMRegister dst, XMMRegister src); + + // Move lower 64bit to high 64bit in 128bit register + void movlhps(XMMRegister dst, XMMRegister src); + void movl(Register dst, int32_t imm32); void movl(Address dst, int32_t imm32); void movl(Register dst, Register src); void movl(Register dst, Address src); void movl(Address dst, Register src);
*** 1613,1623 **** --- 1626,1647 ---- void vsubss(XMMRegister dst, XMMRegister nds, Address src); void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); void vxorpd(XMMRegister dst, XMMRegister nds, Address src); void vxorps(XMMRegister dst, XMMRegister nds, Address src); + // AVX Vector instrucitons. + void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); + void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); + void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); + // AVX instruction which is used to clear upper 128 bits of YMM registers and + // to avoid transaction penalty between AVX and SSE states. There is no + // penalty if legacy SSE instructions are encoded using VEX prefix because + // they always clear upper 128 bits. It should be used before calling + // runtime code and native libraries. + void vzeroupper(); + protected: // Next instructions require address alignment 16 bytes SSE mode. // They should be called only from corresponding MacroAssembler instructions. void andpd(XMMRegister dst, Address src); void andps(XMMRegister dst, Address src);
*** 2525,2537 **** --- 2549,2565 ---- void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); + // AVX Vector instructions + + void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); } void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src); + void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); } void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src); // Data

src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File