486 GS_segment = 0x65,
487
488 REX = 0x40,
489
490 REX_B = 0x41,
491 REX_X = 0x42,
492 REX_XB = 0x43,
493 REX_R = 0x44,
494 REX_RB = 0x45,
495 REX_RX = 0x46,
496 REX_RXB = 0x47,
497
498 REX_W = 0x48,
499
500 REX_WB = 0x49,
501 REX_WX = 0x4A,
502 REX_WXB = 0x4B,
503 REX_WR = 0x4C,
504 REX_WRB = 0x4D,
505 REX_WRX = 0x4E,
506 REX_WRXB = 0x4F
507 };
508
509 enum WhichOperand {
510 // input to locate_operand, and format code for relocations
511 imm_operand = 0, // embedded 32-bit|64-bit immediate operand
512 disp32_operand = 1, // embedded 32-bit displacement or address
513 call32_operand = 2, // embedded 32-bit self-relative displacement
514 #ifndef _LP64
515 _WhichOperand_limit = 3
516 #else
517 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop
518 _WhichOperand_limit = 4
519 #endif
520 };
521
522
523
524 // NOTE: The general philopsophy of the declarations here is that 64bit versions
525 // of instructions are freely declared without the need for wrapping them an ifdef.
526 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
527 // In the .cpp file the implementations are wrapped so that they are dropped out
528 // of the resulting jvm. This is done mostly to keep the footprint of KERNEL
529 // to the size it was prior to merging up the 32bit and 64bit assemblers.
530 //
531 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
532 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
533
534 private:
535
536
537 // 64bit prefixes
538 int prefix_and_encode(int reg_enc, bool byteinst = false);
539 int prefixq_and_encode(int reg_enc);
540
541 int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
542 int prefixq_and_encode(int dst_enc, int src_enc);
543
544 void prefix(Register reg);
545 void prefix(Address adr);
546 void prefixq(Address adr);
547
548 void prefix(Address adr, Register reg, bool byteinst = false);
549 void prefixq(Address adr, Register reg);
550
551 void prefix(Address adr, XMMRegister reg);
552
553 void prefetch_prefix(Address src);
554
555 // Helper functions for groups of instructions
556 void emit_arith_b(int op1, int op2, Register dst, int imm8);
557
558 void emit_arith(int op1, int op2, Register dst, int32_t imm32);
559 // only 32bit??
560 void emit_arith(int op1, int op2, Register dst, jobject obj);
561 void emit_arith(int op1, int op2, Register dst, Register src);
562
563 void emit_operand(Register reg,
564 Register base, Register index, Address::ScaleFactor scale,
565 int disp,
566 RelocationHolder const& rspec,
567 int rip_relative_correction = 0);
568
569 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
570
571 // operands that only take the original 32bit registers
572 void emit_operand32(Register reg, Address adr);
573
574 void emit_operand(XMMRegister reg,
747
748 void addq(Address dst, int32_t imm32);
749 void addq(Address dst, Register src);
750 void addq(Register dst, int32_t imm32);
751 void addq(Register dst, Address src);
752 void addq(Register dst, Register src);
753
754 void addr_nop_4();
755 void addr_nop_5();
756 void addr_nop_7();
757 void addr_nop_8();
758
759 // Add Scalar Double-Precision Floating-Point Values
760 void addsd(XMMRegister dst, Address src);
761 void addsd(XMMRegister dst, XMMRegister src);
762
763 // Add Scalar Single-Precision Floating-Point Values
764 void addss(XMMRegister dst, Address src);
765 void addss(XMMRegister dst, XMMRegister src);
766
767 void andl(Register dst, int32_t imm32);
768 void andl(Register dst, Address src);
769 void andl(Register dst, Register src);
770
771 void andq(Address dst, int32_t imm32);
772 void andq(Register dst, int32_t imm32);
773 void andq(Register dst, Address src);
774 void andq(Register dst, Register src);
775
776 // Bitwise Logical AND of Packed Double-Precision Floating-Point Values
777 void andpd(XMMRegister dst, Address src);
778 void andpd(XMMRegister dst, XMMRegister src);
779
780 void bsfl(Register dst, Register src);
781 void bsrl(Register dst, Register src);
782
783 #ifdef _LP64
784 void bsfq(Register dst, Register src);
785 void bsrq(Register dst, Register src);
786 #endif
787
788 void bswapl(Register reg);
789
790 void bswapq(Register reg);
791
792 void call(Label& L, relocInfo::relocType rtype);
793 void call(Register reg); // push pc; pc <- reg
794 void call(Address adr); // push pc; pc <- adr
795
796 void cdql();
797
798 void cdqq();
799
820 void cmpq(Address dst, Register src);
821
822 void cmpq(Register dst, int32_t imm32);
823 void cmpq(Register dst, Register src);
824 void cmpq(Register dst, Address src);
825
826 // these are dummies used to catch attempting to convert NULL to Register
827 void cmpl(Register dst, void* junk); // dummy
828 void cmpq(Register dst, void* junk); // dummy
829
830 void cmpw(Address dst, int imm16);
831
832 void cmpxchg8 (Address adr);
833
834 void cmpxchgl(Register reg, Address adr);
835
836 void cmpxchgq(Register reg, Address adr);
837
838 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
839 void comisd(XMMRegister dst, Address src);
840
841 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
842 void comiss(XMMRegister dst, Address src);
843
844 // Identify processor type and features
845 void cpuid() {
846 emit_byte(0x0F);
847 emit_byte(0xA2);
848 }
849
850 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
851 void cvtsd2ss(XMMRegister dst, XMMRegister src);
852
853 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
854 void cvtsi2sdl(XMMRegister dst, Register src);
855 void cvtsi2sdq(XMMRegister dst, Register src);
856
857 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
858 void cvtsi2ssl(XMMRegister dst, Register src);
859 void cvtsi2ssq(XMMRegister dst, Register src);
860
861 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
862 void cvtdq2pd(XMMRegister dst, XMMRegister src);
863
864 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
865 void cvtdq2ps(XMMRegister dst, XMMRegister src);
866
867 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
868 void cvtss2sd(XMMRegister dst, XMMRegister src);
869
870 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
871 void cvttsd2sil(Register dst, Address src);
872 void cvttsd2sil(Register dst, XMMRegister src);
873 void cvttsd2siq(Register dst, XMMRegister src);
874
875 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
876 void cvttss2sil(Register dst, XMMRegister src);
877 void cvttss2siq(Register dst, XMMRegister src);
878
879 // Divide Scalar Double-Precision Floating-Point Values
880 void divsd(XMMRegister dst, Address src);
881 void divsd(XMMRegister dst, XMMRegister src);
882
883 // Divide Scalar Single-Precision Floating-Point Values
884 void divss(XMMRegister dst, Address src);
885 void divss(XMMRegister dst, XMMRegister src);
886
887 void emms();
888
1123
1124 void mfence();
1125
1126 // Moves
1127
1128 void mov64(Register dst, int64_t imm64);
1129
1130 void movb(Address dst, Register src);
1131 void movb(Address dst, int imm8);
1132 void movb(Register dst, Address src);
1133
1134 void movdl(XMMRegister dst, Register src);
1135 void movdl(Register dst, XMMRegister src);
1136 void movdl(XMMRegister dst, Address src);
1137
1138 // Move Double Quadword
1139 void movdq(XMMRegister dst, Register src);
1140 void movdq(Register dst, XMMRegister src);
1141
1142 // Move Aligned Double Quadword
1143 void movdqa(Address dst, XMMRegister src);
1144 void movdqa(XMMRegister dst, Address src);
1145 void movdqa(XMMRegister dst, XMMRegister src);
1146
1147 // Move Unaligned Double Quadword
1148 void movdqu(Address dst, XMMRegister src);
1149 void movdqu(XMMRegister dst, Address src);
1150 void movdqu(XMMRegister dst, XMMRegister src);
1151
1152 void movl(Register dst, int32_t imm32);
1153 void movl(Address dst, int32_t imm32);
1154 void movl(Register dst, Register src);
1155 void movl(Register dst, Address src);
1156 void movl(Address dst, Register src);
1157
1158 // These dummies prevent using movl from converting a zero (like NULL) into Register
1159 // by giving the compiler two choices it can't resolve
1160
1161 void movl(Address dst, void* junk);
1162 void movl(Register dst, void* junk);
1163
1164 #ifdef _LP64
1244 #endif
1245
1246 void nop(int i = 1);
1247
1248 void notl(Register dst);
1249
1250 #ifdef _LP64
1251 void notq(Register dst);
1252 #endif
1253
1254 void orl(Address dst, int32_t imm32);
1255 void orl(Register dst, int32_t imm32);
1256 void orl(Register dst, Address src);
1257 void orl(Register dst, Register src);
1258
1259 void orq(Address dst, int32_t imm32);
1260 void orq(Register dst, int32_t imm32);
1261 void orq(Register dst, Address src);
1262 void orq(Register dst, Register src);
1263
1264 // SSE4.2 string instructions
1265 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1266 void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1267
1268 #ifndef _LP64 // no 32bit push/pop on amd64
1269 void popl(Address dst);
1270 #endif
1271
1272 #ifdef _LP64
1273 void popq(Address dst);
1274 #endif
1275
1276 void popcntl(Register dst, Address src);
1277 void popcntl(Register dst, Register src);
1278
1279 #ifdef _LP64
1280 void popcntq(Register dst, Address src);
1281 void popcntq(Register dst, Register src);
1282 #endif
1283
1284 // Prefetches (SSE, SSE2, 3DNOW only)
1285
1286 void prefetchnta(Address src);
1287 void prefetchr(Address src);
1288 void prefetcht0(Address src);
1289 void prefetcht1(Address src);
1290 void prefetcht2(Address src);
1291 void prefetchw(Address src);
1292
1293 // POR - Bitwise logical OR
1294 void por(XMMRegister dst, XMMRegister src);
1295
1296 // Shuffle Packed Doublewords
1297 void pshufd(XMMRegister dst, XMMRegister src, int mode);
1298 void pshufd(XMMRegister dst, Address src, int mode);
1299
1300 // Shuffle Packed Low Words
1301 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1302 void pshuflw(XMMRegister dst, Address src, int mode);
1303
1304 // Shift Right by bits Logical Quadword Immediate
1305 void psrlq(XMMRegister dst, int shift);
1306
1307 // Shift Right by bytes Logical DoubleQuadword Immediate
1308 void psrldq(XMMRegister dst, int shift);
1309
1310 // Logical Compare Double Quadword
1311 void ptest(XMMRegister dst, XMMRegister src);
1312 void ptest(XMMRegister dst, Address src);
1313
1314 // Interleave Low Bytes
1315 void punpcklbw(XMMRegister dst, XMMRegister src);
1316
1317 #ifndef _LP64 // no 32bit push/pop on amd64
1318 void pushl(Address src);
1319 #endif
1320
1321 void pushq(Address src);
1322
1323 // Xor Packed Byte Integer Values
1324 void pxor(XMMRegister dst, Address src);
1325 void pxor(XMMRegister dst, XMMRegister src);
1326
1327 void rcll(Register dst, int imm8);
1328
1329 void rclq(Register dst, int imm8);
1330
1331 void ret(int imm16);
1332
1333 void sahf();
1334
1335 void sarl(Register dst, int imm8);
1336 void sarl(Register dst);
1412
1413
1414 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1415 void ucomisd(XMMRegister dst, Address src);
1416 void ucomisd(XMMRegister dst, XMMRegister src);
1417
1418 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1419 void ucomiss(XMMRegister dst, Address src);
1420 void ucomiss(XMMRegister dst, XMMRegister src);
1421
1422 void xaddl(Address dst, Register src);
1423
1424 void xaddq(Address dst, Register src);
1425
1426 void xchgl(Register reg, Address adr);
1427 void xchgl(Register dst, Register src);
1428
1429 void xchgq(Register reg, Address adr);
1430 void xchgq(Register dst, Register src);
1431
1432 void xorl(Register dst, int32_t imm32);
1433 void xorl(Register dst, Address src);
1434 void xorl(Register dst, Register src);
1435
1436 void xorq(Register dst, Address src);
1437 void xorq(Register dst, Register src);
1438
1439 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1440 void xorpd(XMMRegister dst, Address src);
1441 void xorpd(XMMRegister dst, XMMRegister src);
1442
1443 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1444 void xorps(XMMRegister dst, Address src);
1445 void xorps(XMMRegister dst, XMMRegister src);
1446
1447 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
1448 };
1449
1450
1451 // MacroAssembler extends Assembler by frequently used macros.
1452 //
1453 // Instructions for which a 'better' code sequence exists depending
1454 // on arguments should also go in here.
1455
1456 class MacroAssembler: public Assembler {
1457 friend class LIR_Assembler;
1458 friend class Runtime1; // as_Address()
1459
1460 protected:
1461
1462 Address as_Address(AddressLiteral adr);
1463 Address as_Address(ArrayAddress adr);
1464
1465 // Support for VM calls
1466 //
1467 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
2158 void call(AddressLiteral entry);
2159
2160 // Jumps
2161
2162 // NOTE: these jumps tranfer to the effective address of dst NOT
2163 // the address contained by dst. This is because this is more natural
2164 // for jumps/calls.
2165 void jump(AddressLiteral dst);
2166 void jump_cc(Condition cc, AddressLiteral dst);
2167
2168 // 32bit can do a case table jump in one instruction but we no longer allow the base
2169 // to be installed in the Address class. This jump will tranfers to the address
2170 // contained in the location described by entry (not the address of entry)
2171 void jump(ArrayAddress entry);
2172
2173 // Floating
2174
2175 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
2176 void andpd(XMMRegister dst, AddressLiteral src);
2177
2178 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
2179 void comiss(XMMRegister dst, AddressLiteral src);
2180
2181 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
2182 void comisd(XMMRegister dst, AddressLiteral src);
2183
2184 void fadd_s(Address src) { Assembler::fadd_s(src); }
2185 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
2186
2187 void fldcw(Address src) { Assembler::fldcw(src); }
2188 void fldcw(AddressLiteral src);
2189
2190 void fld_s(int index) { Assembler::fld_s(index); }
2191 void fld_s(Address src) { Assembler::fld_s(src); }
2192 void fld_s(AddressLiteral src);
2193
2194 void fld_d(Address src) { Assembler::fld_d(src); }
2195 void fld_d(AddressLiteral src);
2196
2197 void fld_x(Address src) { Assembler::fld_x(src); }
2198 void fld_x(AddressLiteral src);
2199
2200 void fmul_s(Address src) { Assembler::fmul_s(src); }
2201 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
2202
2203 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
2204 void ldmxcsr(AddressLiteral src);
2205
2206 private:
2207 // these are private because users should be doing movflt/movdbl
2208
2209 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
2210 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
2211 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
2212 void movss(XMMRegister dst, AddressLiteral src);
2213
2214 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
2215 void movlpd(XMMRegister dst, AddressLiteral src);
2216
2217 public:
2218
2219 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
2220 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
2221 void addsd(XMMRegister dst, AddressLiteral src) { Assembler::addsd(dst, as_Address(src)); }
2222
2223 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
2224 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
2225 void addss(XMMRegister dst, AddressLiteral src) { Assembler::addss(dst, as_Address(src)); }
2226
2227 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
2228 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
2229 void divsd(XMMRegister dst, AddressLiteral src) { Assembler::divsd(dst, as_Address(src)); }
2230
2231 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
2232 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
2233 void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); }
2234
2235 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
2236 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
2237 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
2238 void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); }
2239
2240 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
2241 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
2242 void mulsd(XMMRegister dst, AddressLiteral src) { Assembler::mulsd(dst, as_Address(src)); }
2243
2244 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
2245 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
2246 void mulss(XMMRegister dst, AddressLiteral src) { Assembler::mulss(dst, as_Address(src)); }
2247
2248 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
2249 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
2250 void sqrtsd(XMMRegister dst, AddressLiteral src) { Assembler::sqrtsd(dst, as_Address(src)); }
2251
2252 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
2253 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
2254 void sqrtss(XMMRegister dst, AddressLiteral src) { Assembler::sqrtss(dst, as_Address(src)); }
2255
2256 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
2257 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
2258 void subsd(XMMRegister dst, AddressLiteral src) { Assembler::subsd(dst, as_Address(src)); }
2259
2260 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
2261 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
2262 void subss(XMMRegister dst, AddressLiteral src) { Assembler::subss(dst, as_Address(src)); }
2263
2264 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
2265 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
2266 void ucomiss(XMMRegister dst, AddressLiteral src);
2267
2268 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
2269 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
2270 void ucomisd(XMMRegister dst, AddressLiteral src);
2271
2272 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
2273 void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); }
2274 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
2275 void xorpd(XMMRegister dst, AddressLiteral src);
2276
2277 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
2278 void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); }
2279 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
2280 void xorps(XMMRegister dst, AddressLiteral src);
2281
2282 // Data
|
486 GS_segment = 0x65,
487
488 REX = 0x40,
489
490 REX_B = 0x41,
491 REX_X = 0x42,
492 REX_XB = 0x43,
493 REX_R = 0x44,
494 REX_RB = 0x45,
495 REX_RX = 0x46,
496 REX_RXB = 0x47,
497
498 REX_W = 0x48,
499
500 REX_WB = 0x49,
501 REX_WX = 0x4A,
502 REX_WXB = 0x4B,
503 REX_WR = 0x4C,
504 REX_WRB = 0x4D,
505 REX_WRX = 0x4E,
506 REX_WRXB = 0x4F,
507
508 VEX_3bytes = 0xC4,
509 VEX_2bytes = 0xC5
510 };
511
512 enum VexPrefix {
513 VEX_B = 0x20,
514 VEX_X = 0x40,
515 VEX_R = 0x80,
516 VEX_W = 0x80
517 };
518
519 enum VexSimdPrefix {
520 VEX_SIMD_NONE = 0x0,
521 VEX_SIMD_66 = 0x1,
522 VEX_SIMD_F3 = 0x2,
523 VEX_SIMD_F2 = 0x3
524 };
525
526 enum VexOpcode {
527 VEX_OPCODE_NONE = 0x0,
528 VEX_OPCODE_0F = 0x1,
529 VEX_OPCODE_0F_38 = 0x2,
530 VEX_OPCODE_0F_3A = 0x3
531 };
532
533 enum WhichOperand {
534 // input to locate_operand, and format code for relocations
535 imm_operand = 0, // embedded 32-bit|64-bit immediate operand
536 disp32_operand = 1, // embedded 32-bit displacement or address
537 call32_operand = 2, // embedded 32-bit self-relative displacement
538 #ifndef _LP64
539 _WhichOperand_limit = 3
540 #else
541 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop
542 _WhichOperand_limit = 4
543 #endif
544 };
545
546
547
548 // NOTE: The general philopsophy of the declarations here is that 64bit versions
549 // of instructions are freely declared without the need for wrapping them an ifdef.
550 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
551 // In the .cpp file the implementations are wrapped so that they are dropped out
552 // of the resulting jvm. This is done mostly to keep the footprint of KERNEL
553 // to the size it was prior to merging up the 32bit and 64bit assemblers.
554 //
555 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
556 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
557
558 private:
559
560
561 // 64bit prefixes
562 int prefix_and_encode(int reg_enc, bool byteinst = false);
563 int prefixq_and_encode(int reg_enc);
564
565 int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
566 int prefixq_and_encode(int dst_enc, int src_enc);
567
568 void prefix(Register reg);
569 void prefix(Address adr);
570 void prefixq(Address adr);
571
572 void prefix(Address adr, Register reg, bool byteinst = false);
573 void prefix(Address adr, XMMRegister reg);
574 void prefixq(Address adr, Register reg);
575 void prefixq(Address adr, XMMRegister reg);
576
577 void prefetch_prefix(Address src);
578
579 void rex_prefix(Address adr, XMMRegister xreg,
580 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
581 int rex_prefix_and_encode(int dst_enc, int src_enc,
582 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
583
584 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
585 int nds_enc, VexSimdPrefix pre, VexOpcode opc,
586 bool vector256);
587
588 void vex_prefix(Address adr, int nds_enc, int xreg_enc,
589 VexSimdPrefix pre, VexOpcode opc,
590 bool vex_w, bool vector256);
591
592 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
593 VexSimdPrefix pre, VexOpcode opc,
594 bool vex_w, bool vector256);
595
596
597 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
598 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
599 bool rex_w = false, bool vector256 = false);
600
601 void simd_prefix(XMMRegister dst, Address src,
602 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
603 simd_prefix(dst, xnoreg, src, pre, opc);
604 }
605 void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
606 simd_prefix(src, dst, pre);
607 }
608 void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
609 VexSimdPrefix pre) {
610 bool rex_w = true;
611 simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
612 }
613
614
615 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
616 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
617 bool rex_w = false, bool vector256 = false);
618
619 int simd_prefix_and_encode(XMMRegister dst, XMMRegister src,
620 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
621 return simd_prefix_and_encode(dst, xnoreg, src, pre, opc);
622 }
623
624 // Move/convert 32-bit integer value.
625 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
626 VexSimdPrefix pre) {
627 // It is OK to cast from Register to XMMRegister to pass argument here
628 // since only encoding is used in simd_prefix_and_encode() and number of
629 // Gen and Xmm registers are the same.
630 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre);
631 }
632 int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) {
633 return simd_prefix_and_encode(dst, xnoreg, src, pre);
634 }
635 int simd_prefix_and_encode(Register dst, XMMRegister src, VexSimdPrefix pre) {
636 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre);
637 }
638
639 // Move/convert 64-bit integer value.
640 int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
641 VexSimdPrefix pre) {
642 bool rex_w = true;
643 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w);
644 }
645 int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) {
646 return simd_prefix_and_encode_q(dst, xnoreg, src, pre);
647 }
648 int simd_prefix_and_encode_q(Register dst, XMMRegister src, VexSimdPrefix pre) {
649 bool rex_w = true;
650 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, VEX_OPCODE_0F, rex_w);
651 }
652
653 // Helper functions for groups of instructions
654 void emit_arith_b(int op1, int op2, Register dst, int imm8);
655
656 void emit_arith(int op1, int op2, Register dst, int32_t imm32);
657 // only 32bit??
658 void emit_arith(int op1, int op2, Register dst, jobject obj);
659 void emit_arith(int op1, int op2, Register dst, Register src);
660
661 void emit_operand(Register reg,
662 Register base, Register index, Address::ScaleFactor scale,
663 int disp,
664 RelocationHolder const& rspec,
665 int rip_relative_correction = 0);
666
667 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
668
669 // operands that only take the original 32bit registers
670 void emit_operand32(Register reg, Address adr);
671
672 void emit_operand(XMMRegister reg,
845
846 void addq(Address dst, int32_t imm32);
847 void addq(Address dst, Register src);
848 void addq(Register dst, int32_t imm32);
849 void addq(Register dst, Address src);
850 void addq(Register dst, Register src);
851
852 void addr_nop_4();
853 void addr_nop_5();
854 void addr_nop_7();
855 void addr_nop_8();
856
857 // Add Scalar Double-Precision Floating-Point Values
858 void addsd(XMMRegister dst, Address src);
859 void addsd(XMMRegister dst, XMMRegister src);
860
861 // Add Scalar Single-Precision Floating-Point Values
862 void addss(XMMRegister dst, Address src);
863 void addss(XMMRegister dst, XMMRegister src);
864
865 void andl(Address dst, int32_t imm32);
866 void andl(Register dst, int32_t imm32);
867 void andl(Register dst, Address src);
868 void andl(Register dst, Register src);
869
870 void andq(Address dst, int32_t imm32);
871 void andq(Register dst, int32_t imm32);
872 void andq(Register dst, Address src);
873 void andq(Register dst, Register src);
874
875 // Bitwise Logical AND of Packed Double-Precision Floating-Point Values
876 void andpd(XMMRegister dst, XMMRegister src);
877
878 // Bitwise Logical AND of Packed Single-Precision Floating-Point Values
879 void andps(XMMRegister dst, XMMRegister src);
880
881 void bsfl(Register dst, Register src);
882 void bsrl(Register dst, Register src);
883
884 #ifdef _LP64
885 void bsfq(Register dst, Register src);
886 void bsrq(Register dst, Register src);
887 #endif
888
889 void bswapl(Register reg);
890
891 void bswapq(Register reg);
892
893 void call(Label& L, relocInfo::relocType rtype);
894 void call(Register reg); // push pc; pc <- reg
895 void call(Address adr); // push pc; pc <- adr
896
897 void cdql();
898
899 void cdqq();
900
921 void cmpq(Address dst, Register src);
922
923 void cmpq(Register dst, int32_t imm32);
924 void cmpq(Register dst, Register src);
925 void cmpq(Register dst, Address src);
926
927 // these are dummies used to catch attempting to convert NULL to Register
928 void cmpl(Register dst, void* junk); // dummy
929 void cmpq(Register dst, void* junk); // dummy
930
931 void cmpw(Address dst, int imm16);
932
933 void cmpxchg8 (Address adr);
934
935 void cmpxchgl(Register reg, Address adr);
936
937 void cmpxchgq(Register reg, Address adr);
938
939 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
940 void comisd(XMMRegister dst, Address src);
941 void comisd(XMMRegister dst, XMMRegister src);
942
943 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
944 void comiss(XMMRegister dst, Address src);
945 void comiss(XMMRegister dst, XMMRegister src);
946
947 // Identify processor type and features
948 void cpuid() {
949 emit_byte(0x0F);
950 emit_byte(0xA2);
951 }
952
953 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
954 void cvtsd2ss(XMMRegister dst, XMMRegister src);
955 void cvtsd2ss(XMMRegister dst, Address src);
956
957 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
958 void cvtsi2sdl(XMMRegister dst, Register src);
959 void cvtsi2sdl(XMMRegister dst, Address src);
960 void cvtsi2sdq(XMMRegister dst, Register src);
961 void cvtsi2sdq(XMMRegister dst, Address src);
962
963 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
964 void cvtsi2ssl(XMMRegister dst, Register src);
965 void cvtsi2ssl(XMMRegister dst, Address src);
966 void cvtsi2ssq(XMMRegister dst, Register src);
967 void cvtsi2ssq(XMMRegister dst, Address src);
968
969 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
970 void cvtdq2pd(XMMRegister dst, XMMRegister src);
971
972 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
973 void cvtdq2ps(XMMRegister dst, XMMRegister src);
974
975 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
976 void cvtss2sd(XMMRegister dst, XMMRegister src);
977 void cvtss2sd(XMMRegister dst, Address src);
978
979 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
980 void cvttsd2sil(Register dst, Address src);
981 void cvttsd2sil(Register dst, XMMRegister src);
982 void cvttsd2siq(Register dst, XMMRegister src);
983
984 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
985 void cvttss2sil(Register dst, XMMRegister src);
986 void cvttss2siq(Register dst, XMMRegister src);
987
988 // Divide Scalar Double-Precision Floating-Point Values
989 void divsd(XMMRegister dst, Address src);
990 void divsd(XMMRegister dst, XMMRegister src);
991
992 // Divide Scalar Single-Precision Floating-Point Values
993 void divss(XMMRegister dst, Address src);
994 void divss(XMMRegister dst, XMMRegister src);
995
996 void emms();
997
1232
1233 void mfence();
1234
1235 // Moves
1236
1237 void mov64(Register dst, int64_t imm64);
1238
1239 void movb(Address dst, Register src);
1240 void movb(Address dst, int imm8);
1241 void movb(Register dst, Address src);
1242
1243 void movdl(XMMRegister dst, Register src);
1244 void movdl(Register dst, XMMRegister src);
1245 void movdl(XMMRegister dst, Address src);
1246
1247 // Move Double Quadword
1248 void movdq(XMMRegister dst, Register src);
1249 void movdq(Register dst, XMMRegister src);
1250
1251 // Move Aligned Double Quadword
1252 void movdqa(XMMRegister dst, XMMRegister src);
1253
1254 // Move Unaligned Double Quadword
1255 void movdqu(Address dst, XMMRegister src);
1256 void movdqu(XMMRegister dst, Address src);
1257 void movdqu(XMMRegister dst, XMMRegister src);
1258
1259 void movl(Register dst, int32_t imm32);
1260 void movl(Address dst, int32_t imm32);
1261 void movl(Register dst, Register src);
1262 void movl(Register dst, Address src);
1263 void movl(Address dst, Register src);
1264
1265 // These dummies prevent using movl from converting a zero (like NULL) into Register
1266 // by giving the compiler two choices it can't resolve
1267
1268 void movl(Address dst, void* junk);
1269 void movl(Register dst, void* junk);
1270
1271 #ifdef _LP64
1351 #endif
1352
1353 void nop(int i = 1);
1354
1355 void notl(Register dst);
1356
1357 #ifdef _LP64
1358 void notq(Register dst);
1359 #endif
1360
1361 void orl(Address dst, int32_t imm32);
1362 void orl(Register dst, int32_t imm32);
1363 void orl(Register dst, Address src);
1364 void orl(Register dst, Register src);
1365
1366 void orq(Address dst, int32_t imm32);
1367 void orq(Register dst, int32_t imm32);
1368 void orq(Register dst, Address src);
1369 void orq(Register dst, Register src);
1370
1371 // Pack with unsigned saturation
1372 void packuswb(XMMRegister dst, XMMRegister src);
1373 void packuswb(XMMRegister dst, Address src);
1374
1375 // SSE4.2 string instructions
1376 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1377 void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1378
1379 // SSE4.1 packed move
1380 void pmovzxbw(XMMRegister dst, XMMRegister src);
1381 void pmovzxbw(XMMRegister dst, Address src);
1382
1383 #ifndef _LP64 // no 32bit push/pop on amd64
1384 void popl(Address dst);
1385 #endif
1386
1387 #ifdef _LP64
1388 void popq(Address dst);
1389 #endif
1390
1391 void popcntl(Register dst, Address src);
1392 void popcntl(Register dst, Register src);
1393
1394 #ifdef _LP64
1395 void popcntq(Register dst, Address src);
1396 void popcntq(Register dst, Register src);
1397 #endif
1398
1399 // Prefetches (SSE, SSE2, 3DNOW only)
1400
1401 void prefetchnta(Address src);
1402 void prefetchr(Address src);
1403 void prefetcht0(Address src);
1404 void prefetcht1(Address src);
1405 void prefetcht2(Address src);
1406 void prefetchw(Address src);
1407
1408 // POR - Bitwise logical OR
1409 void por(XMMRegister dst, XMMRegister src);
1410 void por(XMMRegister dst, Address src);
1411
1412 // Shuffle Packed Doublewords
1413 void pshufd(XMMRegister dst, XMMRegister src, int mode);
1414 void pshufd(XMMRegister dst, Address src, int mode);
1415
1416 // Shuffle Packed Low Words
1417 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1418 void pshuflw(XMMRegister dst, Address src, int mode);
1419
1420 // Shift Right by bits Logical Quadword Immediate
1421 void psrlq(XMMRegister dst, int shift);
1422
1423 // Shift Right by bytes Logical DoubleQuadword Immediate
1424 void psrldq(XMMRegister dst, int shift);
1425
1426 // Logical Compare Double Quadword
1427 void ptest(XMMRegister dst, XMMRegister src);
1428 void ptest(XMMRegister dst, Address src);
1429
1430 // Interleave Low Bytes
1431 void punpcklbw(XMMRegister dst, XMMRegister src);
1432 void punpcklbw(XMMRegister dst, Address src);
1433
1434 // Interleave Low Doublewords
1435 void punpckldq(XMMRegister dst, XMMRegister src);
1436 void punpckldq(XMMRegister dst, Address src);
1437
1438 #ifndef _LP64 // no 32bit push/pop on amd64
1439 void pushl(Address src);
1440 #endif
1441
1442 void pushq(Address src);
1443
1444 // Xor Packed Byte Integer Values
1445 void pxor(XMMRegister dst, Address src);
1446 void pxor(XMMRegister dst, XMMRegister src);
1447
1448 void rcll(Register dst, int imm8);
1449
1450 void rclq(Register dst, int imm8);
1451
1452 void ret(int imm16);
1453
1454 void sahf();
1455
1456 void sarl(Register dst, int imm8);
1457 void sarl(Register dst);
1533
1534
1535 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1536 void ucomisd(XMMRegister dst, Address src);
1537 void ucomisd(XMMRegister dst, XMMRegister src);
1538
1539 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1540 void ucomiss(XMMRegister dst, Address src);
1541 void ucomiss(XMMRegister dst, XMMRegister src);
1542
1543 void xaddl(Address dst, Register src);
1544
1545 void xaddq(Address dst, Register src);
1546
1547 void xchgl(Register reg, Address adr);
1548 void xchgl(Register dst, Register src);
1549
1550 void xchgq(Register reg, Address adr);
1551 void xchgq(Register dst, Register src);
1552
1553 // Get Value of Extended Control Register
1554 void xgetbv() {
1555 emit_byte(0x0F);
1556 emit_byte(0x01);
1557 emit_byte(0xD0);
1558 }
1559
1560 void xorl(Register dst, int32_t imm32);
1561 void xorl(Register dst, Address src);
1562 void xorl(Register dst, Register src);
1563
1564 void xorq(Register dst, Address src);
1565 void xorq(Register dst, Register src);
1566
1567 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1568 void xorpd(XMMRegister dst, XMMRegister src);
1569
1570 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1571 void xorps(XMMRegister dst, XMMRegister src);
1572
1573 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
1574
1575 protected:
1576 // Next instructions require address alignment 16 bytes SSE mode.
1577 // They should be called only from corresponding MacroAssembler instructions.
1578 void andpd(XMMRegister dst, Address src);
1579 void andps(XMMRegister dst, Address src);
1580 void xorpd(XMMRegister dst, Address src);
1581 void xorps(XMMRegister dst, Address src);
1582
1583 };
1584
1585
1586 // MacroAssembler extends Assembler by frequently used macros.
1587 //
1588 // Instructions for which a 'better' code sequence exists depending
1589 // on arguments should also go in here.
1590
1591 class MacroAssembler: public Assembler {
1592 friend class LIR_Assembler;
1593 friend class Runtime1; // as_Address()
1594
1595 protected:
1596
1597 Address as_Address(AddressLiteral adr);
1598 Address as_Address(ArrayAddress adr);
1599
1600 // Support for VM calls
1601 //
1602 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
2293 void call(AddressLiteral entry);
2294
2295 // Jumps
2296
2297 // NOTE: these jumps tranfer to the effective address of dst NOT
2298 // the address contained by dst. This is because this is more natural
2299 // for jumps/calls.
2300 void jump(AddressLiteral dst);
2301 void jump_cc(Condition cc, AddressLiteral dst);
2302
2303 // 32bit can do a case table jump in one instruction but we no longer allow the base
2304 // to be installed in the Address class. This jump will tranfers to the address
2305 // contained in the location described by entry (not the address of entry)
2306 void jump(ArrayAddress entry);
2307
2308 // Floating
2309
2310 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
2311 void andpd(XMMRegister dst, AddressLiteral src);
2312
2313 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
2314 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
2315 void andps(XMMRegister dst, AddressLiteral src);
2316
2317 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
2318 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
2319 void comiss(XMMRegister dst, AddressLiteral src);
2320
2321 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
2322 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
2323 void comisd(XMMRegister dst, AddressLiteral src);
2324
2325 void fadd_s(Address src) { Assembler::fadd_s(src); }
2326 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
2327
2328 void fldcw(Address src) { Assembler::fldcw(src); }
2329 void fldcw(AddressLiteral src);
2330
2331 void fld_s(int index) { Assembler::fld_s(index); }
2332 void fld_s(Address src) { Assembler::fld_s(src); }
2333 void fld_s(AddressLiteral src);
2334
2335 void fld_d(Address src) { Assembler::fld_d(src); }
2336 void fld_d(AddressLiteral src);
2337
2338 void fld_x(Address src) { Assembler::fld_x(src); }
2339 void fld_x(AddressLiteral src);
2340
2341 void fmul_s(Address src) { Assembler::fmul_s(src); }
2342 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
2343
2344 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
2345 void ldmxcsr(AddressLiteral src);
2346
2347 private:
2348 // these are private because users should be doing movflt/movdbl
2349
2350 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
2351 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
2352 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
2353 void movss(XMMRegister dst, AddressLiteral src);
2354
2355 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
2356 void movlpd(XMMRegister dst, AddressLiteral src);
2357
2358 public:
2359
2360 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
2361 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
2362 void addsd(XMMRegister dst, AddressLiteral src);
2363
2364 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
2365 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
2366 void addss(XMMRegister dst, AddressLiteral src);
2367
2368 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
2369 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
2370 void divsd(XMMRegister dst, AddressLiteral src);
2371
2372 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
2373 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
2374 void divss(XMMRegister dst, AddressLiteral src);
2375
2376 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
2377 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
2378 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
2379 void movsd(XMMRegister dst, AddressLiteral src);
2380
2381 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
2382 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
2383 void mulsd(XMMRegister dst, AddressLiteral src);
2384
2385 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
2386 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
2387 void mulss(XMMRegister dst, AddressLiteral src);
2388
2389 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
2390 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
2391 void sqrtsd(XMMRegister dst, AddressLiteral src);
2392
2393 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
2394 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
2395 void sqrtss(XMMRegister dst, AddressLiteral src);
2396
2397 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
2398 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
2399 void subsd(XMMRegister dst, AddressLiteral src);
2400
2401 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
2402 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
2403 void subss(XMMRegister dst, AddressLiteral src);
2404
2405 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
2406 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
2407 void ucomiss(XMMRegister dst, AddressLiteral src);
2408
2409 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
2410 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
2411 void ucomisd(XMMRegister dst, AddressLiteral src);
2412
2413 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
2414 void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); }
2415 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
2416 void xorpd(XMMRegister dst, AddressLiteral src);
2417
2418 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
2419 void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); }
2420 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
2421 void xorps(XMMRegister dst, AddressLiteral src);
2422
2423 // Data
|