< prev index next >

src/cpu/x86/vm/assembler_x86.hpp

Print this page




1314         }
1315 
1316         lock();
1317         addl(Address(rsp, offset), 0);// Assert the lock# signal here
1318       }
1319     }
1320   }
1321 
1322   void mfence();
1323 
1324   // Moves
1325 
1326   void mov64(Register dst, int64_t imm64);
1327 
1328   void movb(Address dst, Register src);
1329   void movb(Address dst, int imm8);
1330   void movb(Register dst, Address src);
1331 
1332   void movddup(XMMRegister dst, XMMRegister src);
1333 


1334   void kmovql(KRegister dst, KRegister src);
1335   void kmovql(KRegister dst, Register src);
1336   void kmovdl(KRegister dst, Register src);
1337   void kmovwl(KRegister dst, Register src);
1338   void kmovql(Address dst, KRegister src);
1339   void kmovql(KRegister dst, Address src);
1340 





1341   void movdl(XMMRegister dst, Register src);
1342   void movdl(Register dst, XMMRegister src);
1343   void movdl(XMMRegister dst, Address src);
1344   void movdl(Address dst, XMMRegister src);
1345 
1346   // Move Double Quadword
1347   void movdq(XMMRegister dst, Register src);
1348   void movdq(Register dst, XMMRegister src);
1349 
1350   // Move Aligned Double Quadword
1351   void movdqa(XMMRegister dst, XMMRegister src);
1352   void movdqa(XMMRegister dst, Address src);
1353 
1354   // Move Unaligned Double Quadword
1355   void movdqu(Address     dst, XMMRegister src);
1356   void movdqu(XMMRegister dst, Address src);
1357   void movdqu(XMMRegister dst, XMMRegister src);
1358 
1359   // Move Unaligned 256bit Vector
1360   void vmovdqu(Address dst, XMMRegister src);
1361   void vmovdqu(XMMRegister dst, Address src);
1362   void vmovdqu(XMMRegister dst, XMMRegister src);
1363 
1364    // Move Unaligned 512bit Vector






1365   void evmovdqul(Address dst, XMMRegister src, int vector_len);
1366   void evmovdqul(XMMRegister dst, Address src, int vector_len);
1367   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1368   void evmovdquq(Address dst, XMMRegister src, int vector_len);
1369   void evmovdquq(XMMRegister dst, Address src, int vector_len);
1370   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1371 
1372   // Move lower 64bit to high 64bit in 128bit register
1373   void movlhps(XMMRegister dst, XMMRegister src);
1374 
1375   void movl(Register dst, int32_t imm32);
1376   void movl(Address dst, int32_t imm32);
1377   void movl(Register dst, Register src);
1378   void movl(Register dst, Address src);
1379   void movl(Address dst, Register src);
1380 
1381   // These dummies prevent using movl from converting a zero (like NULL) into Register
1382   // by giving the compiler two choices it can't resolve
1383 
1384   void movl(Address  dst, void* junk);


1490   void orq(Address dst, int32_t imm32);
1491   void orq(Register dst, int32_t imm32);
1492   void orq(Register dst, Address src);
1493   void orq(Register dst, Register src);
1494 
1495   // Pack with unsigned saturation
1496   void packuswb(XMMRegister dst, XMMRegister src);
1497   void packuswb(XMMRegister dst, Address src);
1498   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1499 
1500   // Pemutation of 64bit words
1501   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1502   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1503 
1504   void pause();
1505 
1506   // SSE4.2 string instructions
1507   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1508   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1509 




1510   void pcmpeqw(XMMRegister dst, XMMRegister src);
1511   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);










1512 
1513   void pmovmskb(Register dst, XMMRegister src);
1514   void vpmovmskb(Register dst, XMMRegister src);
1515 
1516   // SSE 4.1 extract
1517   void pextrd(Register dst, XMMRegister src, int imm8);
1518   void pextrq(Register dst, XMMRegister src, int imm8);
1519   // SSE 2 extract
1520   void pextrw(Register dst, XMMRegister src, int imm8);
1521 
1522   // SSE 4.1 insert
1523   void pinsrd(XMMRegister dst, Register src, int imm8);
1524   void pinsrq(XMMRegister dst, Register src, int imm8);
1525   // SSE 2 insert
1526   void pinsrw(XMMRegister dst, Register src, int imm8);
1527 
1528   // SSE4.1 packed move
1529   void pmovzxbw(XMMRegister dst, XMMRegister src);
1530   void pmovzxbw(XMMRegister dst, Address src);
1531 
1532   void vpmovzxbw(XMMRegister dst, Address src);
1533 
1534 #ifndef _LP64 // no 32bit push/pop on amd64
1535   void popl(Address dst);
1536 #endif
1537 
1538 #ifdef _LP64
1539   void popq(Address dst);
1540 #endif
1541 
1542   void popcntl(Register dst, Address src);
1543   void popcntl(Register dst, Register src);
1544 
1545 #ifdef _LP64
1546   void popcntq(Register dst, Address src);
1547   void popcntq(Register dst, Register src);
1548 #endif
1549 
1550   // Prefetches (SSE, SSE2, 3DNOW only)
1551 
1552   void prefetchnta(Address src);




1314         }
1315 
1316         lock();
1317         addl(Address(rsp, offset), 0);// Assert the lock# signal here
1318       }
1319     }
1320   }
1321 
1322   void mfence();
1323 
1324   // Moves
1325 
1326   void mov64(Register dst, int64_t imm64);
1327 
1328   void movb(Address dst, Register src);
1329   void movb(Address dst, int imm8);
1330   void movb(Register dst, Address src);
1331 
1332   void movddup(XMMRegister dst, XMMRegister src);
1333 
1334   void kmovwl(KRegister dst, Register src);
1335   void kmovdl(KRegister dst, Register src);
1336   void kmovql(KRegister dst, KRegister src);
1337   void kmovql(KRegister dst, Register src);


1338   void kmovql(Address dst, KRegister src);
1339   void kmovql(KRegister dst, Address src);
1340 
1341   void kortestbl(KRegister dst, KRegister src);
1342   void kortestwl(KRegister dst, KRegister src);
1343   void kortestdl(KRegister dst, KRegister src);
1344   void kortestql(KRegister dst, KRegister src);
1345 
1346   void movdl(XMMRegister dst, Register src);
1347   void movdl(Register dst, XMMRegister src);
1348   void movdl(XMMRegister dst, Address src);
1349   void movdl(Address dst, XMMRegister src);
1350 
1351   // Move Double Quadword
1352   void movdq(XMMRegister dst, Register src);
1353   void movdq(Register dst, XMMRegister src);
1354 
1355   // Move Aligned Double Quadword
1356   void movdqa(XMMRegister dst, XMMRegister src);
1357   void movdqa(XMMRegister dst, Address src);
1358 
1359   // Move Unaligned Double Quadword
1360   void movdqu(Address     dst, XMMRegister src);
1361   void movdqu(XMMRegister dst, Address src);
1362   void movdqu(XMMRegister dst, XMMRegister src);
1363 
1364   // Move Unaligned 256bit Vector
1365   void vmovdqu(Address dst, XMMRegister src);
1366   void vmovdqu(XMMRegister dst, Address src);
1367   void vmovdqu(XMMRegister dst, XMMRegister src);
1368 
1369    // Move Unaligned 512bit Vector
1370   void evmovdqub(Address dst, XMMRegister src, int vector_len);
1371   void evmovdqub(XMMRegister dst, Address src, int vector_len);
1372   void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);
1373   void evmovdquw(Address dst, XMMRegister src, int vector_len);
1374   void evmovdquw(XMMRegister dst, Address src, int vector_len);
1375   void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
1376   void evmovdqul(Address dst, XMMRegister src, int vector_len);
1377   void evmovdqul(XMMRegister dst, Address src, int vector_len);
1378   void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1379   void evmovdquq(Address dst, XMMRegister src, int vector_len);
1380   void evmovdquq(XMMRegister dst, Address src, int vector_len);
1381   void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1382 
1383   // Move lower 64bit to high 64bit in 128bit register
1384   void movlhps(XMMRegister dst, XMMRegister src);
1385 
1386   void movl(Register dst, int32_t imm32);
1387   void movl(Address dst, int32_t imm32);
1388   void movl(Register dst, Register src);
1389   void movl(Register dst, Address src);
1390   void movl(Address dst, Register src);
1391 
1392   // These dummies prevent using movl from converting a zero (like NULL) into Register
1393   // by giving the compiler two choices it can't resolve
1394 
1395   void movl(Address  dst, void* junk);


1501   void orq(Address dst, int32_t imm32);
1502   void orq(Register dst, int32_t imm32);
1503   void orq(Register dst, Address src);
1504   void orq(Register dst, Register src);
1505 
1506   // Pack with unsigned saturation
1507   void packuswb(XMMRegister dst, XMMRegister src);
1508   void packuswb(XMMRegister dst, Address src);
1509   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1510 
1511   // Pemutation of 64bit words
1512   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1513   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1514 
1515   void pause();
1516 
1517   // SSE4.2 string instructions
1518   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1519   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1520 
1521   void pcmpeqb(XMMRegister dst, XMMRegister src);
1522   void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1523   void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1524 
1525   void pcmpeqw(XMMRegister dst, XMMRegister src);
1526   void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1527   void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1528 
1529   void pcmpeqd(XMMRegister dst, XMMRegister src);
1530   void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1531   void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1532 
1533   void pcmpeqq(XMMRegister dst, XMMRegister src);
1534   void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1535   void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1536   void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1537 
1538   void pmovmskb(Register dst, XMMRegister src);
1539   void vpmovmskb(Register dst, XMMRegister src);
1540 
1541   // SSE 4.1 extract
1542   void pextrd(Register dst, XMMRegister src, int imm8);
1543   void pextrq(Register dst, XMMRegister src, int imm8);
1544   // SSE 2 extract
1545   void pextrw(Register dst, XMMRegister src, int imm8);
1546 
1547   // SSE 4.1 insert
1548   void pinsrd(XMMRegister dst, Register src, int imm8);
1549   void pinsrq(XMMRegister dst, Register src, int imm8);
1550   // SSE 2 insert
1551   void pinsrw(XMMRegister dst, Register src, int imm8);
1552 
1553   // SSE4.1 packed move
1554   void pmovzxbw(XMMRegister dst, XMMRegister src);
1555   void pmovzxbw(XMMRegister dst, Address src);
1556 
1557   void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1558 
1559 #ifndef _LP64 // no 32bit push/pop on amd64
1560   void popl(Address dst);
1561 #endif
1562 
1563 #ifdef _LP64
1564   void popq(Address dst);
1565 #endif
1566 
1567   void popcntl(Register dst, Address src);
1568   void popcntl(Register dst, Register src);
1569 
1570 #ifdef _LP64
1571   void popcntq(Register dst, Address src);
1572   void popcntq(Register dst, Register src);
1573 #endif
1574 
1575   // Prefetches (SSE, SSE2, 3DNOW only)
1576 
1577   void prefetchnta(Address src);


< prev index next >