src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7088419 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.hpp

Print this page




1249 
1250   // Moves
1251 
1252   void mov64(Register dst, int64_t imm64);
1253 
1254   void movb(Address dst, Register src);
1255   void movb(Address dst, int imm8);
1256   void movb(Register dst, Address src);
1257 
1258   void movdl(XMMRegister dst, Register src);
1259   void movdl(Register dst, XMMRegister src);
1260   void movdl(XMMRegister dst, Address src);
1261   void movdl(Address dst, XMMRegister src);
1262 
1263   // Move Double Quadword
1264   void movdq(XMMRegister dst, Register src);
1265   void movdq(Register dst, XMMRegister src);
1266 
1267   // Move Aligned Double Quadword
1268   void movdqa(XMMRegister dst, XMMRegister src);

1269 
1270   // Move Unaligned Double Quadword
1271   void movdqu(Address     dst, XMMRegister src);
1272   void movdqu(XMMRegister dst, Address src);
1273   void movdqu(XMMRegister dst, XMMRegister src);
1274 
1275   // Move Unaligned 256bit Vector
1276   void vmovdqu(Address dst, XMMRegister src);
1277   void vmovdqu(XMMRegister dst, Address src);
1278   void vmovdqu(XMMRegister dst, XMMRegister src);
1279 
1280   // Move lower 64bit to high 64bit in 128bit register
1281   void movlhps(XMMRegister dst, XMMRegister src);
1282 
1283   void movl(Register dst, int32_t imm32);
1284   void movl(Address dst, int32_t imm32);
1285   void movl(Register dst, Register src);
1286   void movl(Register dst, Address src);
1287   void movl(Address dst, Register src);
1288 


1387   void orl(Register dst, Address src);
1388   void orl(Register dst, Register src);
1389 
1390   void orq(Address dst, int32_t imm32);
1391   void orq(Register dst, int32_t imm32);
1392   void orq(Register dst, Address src);
1393   void orq(Register dst, Register src);
1394 
1395   // Pack with unsigned saturation
1396   void packuswb(XMMRegister dst, XMMRegister src);
1397   void packuswb(XMMRegister dst, Address src);
1398   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1399 
1400   // Pemutation of 64bit words
1401   void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
1402 
1403   // SSE4.2 string instructions
1404   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1405   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1406 








1407   // SSE4.1 packed move
1408   void pmovzxbw(XMMRegister dst, XMMRegister src);
1409   void pmovzxbw(XMMRegister dst, Address src);
1410 
1411 #ifndef _LP64 // no 32bit push/pop on amd64
1412   void popl(Address dst);
1413 #endif
1414 
1415 #ifdef _LP64
1416   void popq(Address dst);
1417 #endif
1418 
1419   void popcntl(Register dst, Address src);
1420   void popcntl(Register dst, Register src);
1421 
1422 #ifdef _LP64
1423   void popcntq(Register dst, Address src);
1424   void popcntq(Register dst, Register src);
1425 #endif
1426 


1747   void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1748 
1749   // Xor packed integers
1750   void pxor(XMMRegister dst, XMMRegister src);
1751   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1752   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1753 
1754   // Copy low 128bit into high 128bit of YMM registers.
1755   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1756   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1757 
1758   // Load/store high 128bit of YMM registers which does not destroy other half.
1759   void vinsertf128h(XMMRegister dst, Address src);
1760   void vinserti128h(XMMRegister dst, Address src);
1761   void vextractf128h(Address dst, XMMRegister src);
1762   void vextracti128h(Address dst, XMMRegister src);
1763 
1764   // duplicate 4-bytes integer data from src into 8 locations in dest
1765   void vpbroadcastd(XMMRegister dst, XMMRegister src);
1766 



1767   // AVX instruction which is used to clear upper 128 bits of YMM registers and
1768   // to avoid transaction penalty between AVX and SSE states. There is no
1769   // penalty if legacy SSE instructions are encoded using VEX prefix because
1770   // they always clear upper 128 bits. It should be used before calling
1771   // runtime code and native libraries.
1772   void vzeroupper();
1773 
1774  protected:
1775   // Next instructions require address alignment 16 bytes SSE mode.
1776   // They should be called only from corresponding MacroAssembler instructions.
1777   void andpd(XMMRegister dst, Address src);
1778   void andps(XMMRegister dst, Address src);
1779   void xorpd(XMMRegister dst, Address src);
1780   void xorps(XMMRegister dst, Address src);
1781 
1782 };
1783 
1784 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP


1249 
1250   // Moves
1251 
1252   void mov64(Register dst, int64_t imm64);
1253 
1254   void movb(Address dst, Register src);
1255   void movb(Address dst, int imm8);
1256   void movb(Register dst, Address src);
1257 
1258   void movdl(XMMRegister dst, Register src);
1259   void movdl(Register dst, XMMRegister src);
1260   void movdl(XMMRegister dst, Address src);
1261   void movdl(Address dst, XMMRegister src);
1262 
1263   // Move Double Quadword
1264   void movdq(XMMRegister dst, Register src);
1265   void movdq(Register dst, XMMRegister src);
1266 
1267   // Move Aligned Double Quadword
1268   void movdqa(XMMRegister dst, XMMRegister src);
1269   void movdqa(XMMRegister dst, Address src);
1270 
1271   // Move Unaligned Double Quadword
1272   void movdqu(Address     dst, XMMRegister src);
1273   void movdqu(XMMRegister dst, Address src);
1274   void movdqu(XMMRegister dst, XMMRegister src);
1275 
1276   // Move Unaligned 256bit Vector
1277   void vmovdqu(Address dst, XMMRegister src);
1278   void vmovdqu(XMMRegister dst, Address src);
1279   void vmovdqu(XMMRegister dst, XMMRegister src);
1280 
1281   // Move lower 64bit to high 64bit in 128bit register
1282   void movlhps(XMMRegister dst, XMMRegister src);
1283 
1284   void movl(Register dst, int32_t imm32);
1285   void movl(Address dst, int32_t imm32);
1286   void movl(Register dst, Register src);
1287   void movl(Register dst, Address src);
1288   void movl(Address dst, Register src);
1289 


1388   void orl(Register dst, Address src);
1389   void orl(Register dst, Register src);
1390 
1391   void orq(Address dst, int32_t imm32);
1392   void orq(Register dst, int32_t imm32);
1393   void orq(Register dst, Address src);
1394   void orq(Register dst, Register src);
1395 
1396   // Pack with unsigned saturation
1397   void packuswb(XMMRegister dst, XMMRegister src);
1398   void packuswb(XMMRegister dst, Address src);
1399   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1400 
1401   // Pemutation of 64bit words
1402   void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
1403 
1404   // SSE4.2 string instructions
1405   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1406   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1407 
1408   // SSE 4.1 extract
1409   void pextrd(Register dst, XMMRegister src, int imm8);
1410   void pextrq(Register dst, XMMRegister src, int imm8);
1411 
1412   // SSE 4.1 insert
1413   void pinsrd(XMMRegister dst, Register src, int imm8);
1414   void pinsrq(XMMRegister dst, Register src, int imm8);
1415 
1416   // SSE4.1 packed move
1417   void pmovzxbw(XMMRegister dst, XMMRegister src);
1418   void pmovzxbw(XMMRegister dst, Address src);
1419 
1420 #ifndef _LP64 // no 32bit push/pop on amd64
1421   void popl(Address dst);
1422 #endif
1423 
1424 #ifdef _LP64
1425   void popq(Address dst);
1426 #endif
1427 
1428   void popcntl(Register dst, Address src);
1429   void popcntl(Register dst, Register src);
1430 
1431 #ifdef _LP64
1432   void popcntq(Register dst, Address src);
1433   void popcntq(Register dst, Register src);
1434 #endif
1435 


1756   void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1757 
1758   // Xor packed integers
1759   void pxor(XMMRegister dst, XMMRegister src);
1760   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1761   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
1762 
1763   // Copy low 128bit into high 128bit of YMM registers.
1764   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1765   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1766 
1767   // Load/store high 128bit of YMM registers which does not destroy other half.
1768   void vinsertf128h(XMMRegister dst, Address src);
1769   void vinserti128h(XMMRegister dst, Address src);
1770   void vextractf128h(Address dst, XMMRegister src);
1771   void vextracti128h(Address dst, XMMRegister src);
1772 
1773   // duplicate 4-bytes integer data from src into 8 locations in dest
1774   void vpbroadcastd(XMMRegister dst, XMMRegister src);
1775 
1776   // Carry-Less Multiplication Quadword
1777   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
1778 
1779   // AVX instruction which is used to clear upper 128 bits of YMM registers and
1780   // to avoid transaction penalty between AVX and SSE states. There is no
1781   // penalty if legacy SSE instructions are encoded using VEX prefix because
1782   // they always clear upper 128 bits. It should be used before calling
1783   // runtime code and native libraries.
1784   void vzeroupper();
1785 
1786  protected:
1787   // Next instructions require address alignment 16 bytes SSE mode.
1788   // They should be called only from corresponding MacroAssembler instructions.
1789   void andpd(XMMRegister dst, Address src);
1790   void andps(XMMRegister dst, Address src);
1791   void xorpd(XMMRegister dst, Address src);
1792   void xorps(XMMRegister dst, Address src);
1793 
1794 };
1795 
1796 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP
src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File