< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page

        

@@ -2139,10 +2139,15 @@
   // penalty if legacy SSE instructions are encoded using VEX prefix because
   // they always clear upper 128 bits. It should be used before calling
   // runtime code and native libraries.
   void vzeroupper();
 
+  // AVX support for vectorized conditional move (double). The following two instructions used only coupled.
+  void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
+  void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
+
+
  protected:
   // Next instructions require address alignment 16 bytes SSE mode.
   // They should be called only from corresponding MacroAssembler instructions.
   void andpd(XMMRegister dst, Address src);
   void andps(XMMRegister dst, Address src);
< prev index next >