src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7119644 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.hpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 574   void prefixq(Address adr, Register reg);
 575   void prefixq(Address adr, XMMRegister reg);
 576 
 577   void prefetch_prefix(Address src);
 578 
 579   void rex_prefix(Address adr, XMMRegister xreg,
 580                   VexSimdPrefix pre, VexOpcode opc, bool rex_w);
 581   int  rex_prefix_and_encode(int dst_enc, int src_enc,
 582                              VexSimdPrefix pre, VexOpcode opc, bool rex_w);
 583 
 584   void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
 585                   int nds_enc, VexSimdPrefix pre, VexOpcode opc,
 586                   bool vector256);
 587 
 588   void vex_prefix(Address adr, int nds_enc, int xreg_enc,
 589                   VexSimdPrefix pre, VexOpcode opc,
 590                   bool vex_w, bool vector256);
 591 
 592   void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
 593                   VexSimdPrefix pre, bool vector256 = false) {
 594      vex_prefix(src, nds->encoding(), dst->encoding(),
 595                 pre, VEX_OPCODE_0F, false, vector256);

 596   }
 597 
 598   int  vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
 599                              VexSimdPrefix pre, VexOpcode opc,
 600                              bool vex_w, bool vector256);
 601 
 602   int  vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
 603                              VexSimdPrefix pre, bool vector256 = false) {
 604      return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
 605                                   pre, VEX_OPCODE_0F, false, vector256);



 606   }
 607 
 608   void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
 609                    VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
 610                    bool rex_w = false, bool vector256 = false);
 611 
 612   void simd_prefix(XMMRegister dst, Address src,
 613                    VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
 614     simd_prefix(dst, xnoreg, src, pre, opc);
 615   }
 616   void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
 617     simd_prefix(src, dst, pre);
 618   }
 619   void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
 620                      VexSimdPrefix pre) {
 621     bool rex_w = true;
 622     simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
 623   }
 624 
 625 


1244         // orderAccess code.
1245         lock();
1246         addl(Address(rsp, 0), 0);// Assert the lock# signal here
1247       }
1248     }
1249   }
1250 
1251   void mfence();
1252 
1253   // Moves
1254 
1255   void mov64(Register dst, int64_t imm64);
1256 
1257   void movb(Address dst, Register src);
1258   void movb(Address dst, int imm8);
1259   void movb(Register dst, Address src);
1260 
1261   void movdl(XMMRegister dst, Register src);
1262   void movdl(Register dst, XMMRegister src);
1263   void movdl(XMMRegister dst, Address src);

1264 
1265   // Move Double Quadword
1266   void movdq(XMMRegister dst, Register src);
1267   void movdq(Register dst, XMMRegister src);
1268 
1269   // Move Aligned Double Quadword
1270   void movdqa(XMMRegister dst, XMMRegister src);
1271 
1272   // Move Unaligned Double Quadword
1273   void movdqu(Address     dst, XMMRegister src);
1274   void movdqu(XMMRegister dst, Address src);
1275   void movdqu(XMMRegister dst, XMMRegister src);
1276 








1277   void movl(Register dst, int32_t imm32);
1278   void movl(Address dst, int32_t imm32);
1279   void movl(Register dst, Register src);
1280   void movl(Register dst, Address src);
1281   void movl(Address dst, Register src);
1282 
1283   // These dummies prevent using movl from converting a zero (like NULL) into Register
1284   // by giving the compiler two choices it can't resolve
1285 
1286   void movl(Address  dst, void* junk);
1287   void movl(Register dst, void* junk);
1288 
1289 #ifdef _LP64
1290   void movq(Register dst, Register src);
1291   void movq(Register dst, Address src);
1292   void movq(Address  dst, Register src);
1293 #endif
1294 
1295   void movq(Address     dst, MMXRegister src );
1296   void movq(MMXRegister dst, Address src );


1598   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1599   void vaddss(XMMRegister dst, XMMRegister nds, Address src);
1600   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1601   void vandpd(XMMRegister dst, XMMRegister nds, Address src);
1602   void vandps(XMMRegister dst, XMMRegister nds, Address src);
1603   void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
1604   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1605   void vdivss(XMMRegister dst, XMMRegister nds, Address src);
1606   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1607   void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
1608   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1609   void vmulss(XMMRegister dst, XMMRegister nds, Address src);
1610   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1611   void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
1612   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1613   void vsubss(XMMRegister dst, XMMRegister nds, Address src);
1614   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1615   void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
1616   void vxorps(XMMRegister dst, XMMRegister nds, Address src);
1617 




1618 







1619  protected:
1620   // Next instructions require address alignment 16 bytes SSE mode.
1621   // They should be called only from corresponding MacroAssembler instructions.
1622   void andpd(XMMRegister dst, Address src);
1623   void andps(XMMRegister dst, Address src);
1624   void xorpd(XMMRegister dst, Address src);
1625   void xorps(XMMRegister dst, Address src);
1626 
1627 };
1628 
1629 
1630 // MacroAssembler extends Assembler by frequently used macros.
1631 //
1632 // Instructions for which a 'better' code sequence exists depending
1633 // on arguments should also go in here.
1634 
1635 class MacroAssembler: public Assembler {
1636   friend class LIR_Assembler;
1637   friend class Runtime1;      // as_Address()
1638 


2510   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
2511   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
2512   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2513 
2514   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
2515   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
2516   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2517 
2518   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
2519   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
2520   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2521 
2522   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
2523   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
2524   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2525 
2526   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
2527   void vsubss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubss(dst, nds, src); }
2528   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2529 



2530   void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); }
2531   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2532 

2533   void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
2534   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2535 
2536 
2537   // Data
2538 
2539   void cmov32( Condition cc, Register dst, Address  src);
2540   void cmov32( Condition cc, Register dst, Register src);
2541 
2542   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
2543 
2544   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
2545   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
2546 
2547   void movoop(Register dst, jobject obj);
2548   void movoop(Address dst, jobject obj);
2549 
2550   void movptr(ArrayAddress dst, Register src);
2551   // can this do an lea?
2552   void movptr(Register dst, ArrayAddress src);


   1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 574   void prefixq(Address adr, Register reg);
 575   void prefixq(Address adr, XMMRegister reg);
 576 
 577   void prefetch_prefix(Address src);
 578 
 579   void rex_prefix(Address adr, XMMRegister xreg,
 580                   VexSimdPrefix pre, VexOpcode opc, bool rex_w);
 581   int  rex_prefix_and_encode(int dst_enc, int src_enc,
 582                              VexSimdPrefix pre, VexOpcode opc, bool rex_w);
 583 
 584   void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
 585                   int nds_enc, VexSimdPrefix pre, VexOpcode opc,
 586                   bool vector256);
 587 
 588   void vex_prefix(Address adr, int nds_enc, int xreg_enc,
 589                   VexSimdPrefix pre, VexOpcode opc,
 590                   bool vex_w, bool vector256);
 591 
 592   void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
 593                   VexSimdPrefix pre, bool vector256 = false) {
 594     int dst_enc = dst->encoding();
 595     int nds_enc = nds->is_valid() ? nds->encoding() : 0;
 596     vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
 597   }
 598 
 599   int  vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
 600                              VexSimdPrefix pre, VexOpcode opc,
 601                              bool vex_w, bool vector256);
 602 
 603   int  vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
 604                              VexSimdPrefix pre, bool vector256 = false,
 605                              VexOpcode opc = VEX_OPCODE_0F) {
 606     int src_enc = src->encoding();
 607     int dst_enc = dst->encoding();
 608     int nds_enc = nds->is_valid() ? nds->encoding() : 0;
 609     return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256);
 610   }
 611 
 612   void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
 613                    VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
 614                    bool rex_w = false, bool vector256 = false);
 615 
 616   void simd_prefix(XMMRegister dst, Address src,
 617                    VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
 618     simd_prefix(dst, xnoreg, src, pre, opc);
 619   }
 620   void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
 621     simd_prefix(src, dst, pre);
 622   }
 623   void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
 624                      VexSimdPrefix pre) {
 625     bool rex_w = true;
 626     simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
 627   }
 628 
 629 


1248         // orderAccess code.
1249         lock();
1250         addl(Address(rsp, 0), 0);// Assert the lock# signal here
1251       }
1252     }
1253   }
1254 
1255   void mfence();
1256 
1257   // Moves
1258 
1259   void mov64(Register dst, int64_t imm64);
1260 
1261   void movb(Address dst, Register src);
1262   void movb(Address dst, int imm8);
1263   void movb(Register dst, Address src);
1264 
1265   void movdl(XMMRegister dst, Register src);
1266   void movdl(Register dst, XMMRegister src);
1267   void movdl(XMMRegister dst, Address src);
1268   void movdl(Address dst, XMMRegister src);
1269 
1270   // Move Double Quadword
1271   void movdq(XMMRegister dst, Register src);
1272   void movdq(Register dst, XMMRegister src);
1273 
1274   // Move Aligned Double Quadword
1275   void movdqa(XMMRegister dst, XMMRegister src);
1276 
1277   // Move Unaligned Double Quadword
1278   void movdqu(Address     dst, XMMRegister src);
1279   void movdqu(XMMRegister dst, Address src);
1280   void movdqu(XMMRegister dst, XMMRegister src);
1281 
1282   // Move Unaligned 256bit Vector
1283   void vmovdqu(Address dst, XMMRegister src);
1284   void vmovdqu(XMMRegister dst, Address src);
1285   void vmovdqu(XMMRegister dst, XMMRegister src);
1286 
1287   // Move lower 64bit to high 64bit in 128bit register
1288   void movlhps(XMMRegister dst, XMMRegister src);
1289 
1290   void movl(Register dst, int32_t imm32);
1291   void movl(Address dst, int32_t imm32);
1292   void movl(Register dst, Register src);
1293   void movl(Register dst, Address src);
1294   void movl(Address dst, Register src);
1295 
1296   // These dummies prevent using movl from converting a zero (like NULL) into Register
1297   // by giving the compiler two choices it can't resolve
1298 
1299   void movl(Address  dst, void* junk);
1300   void movl(Register dst, void* junk);
1301 
1302 #ifdef _LP64
1303   void movq(Register dst, Register src);
1304   void movq(Register dst, Address src);
1305   void movq(Address  dst, Register src);
1306 #endif
1307 
1308   void movq(Address     dst, MMXRegister src );
1309   void movq(MMXRegister dst, Address src );


1611   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1612   void vaddss(XMMRegister dst, XMMRegister nds, Address src);
1613   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1614   void vandpd(XMMRegister dst, XMMRegister nds, Address src);
1615   void vandps(XMMRegister dst, XMMRegister nds, Address src);
1616   void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
1617   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1618   void vdivss(XMMRegister dst, XMMRegister nds, Address src);
1619   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1620   void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
1621   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1622   void vmulss(XMMRegister dst, XMMRegister nds, Address src);
1623   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1624   void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
1625   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1626   void vsubss(XMMRegister dst, XMMRegister nds, Address src);
1627   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1628   void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
1629   void vxorps(XMMRegister dst, XMMRegister nds, Address src);
1630 
1631   // AVX Vector instrucitons.
1632   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1633   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
1634   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
1635 
1636   // AVX instruction which is used to clear upper 128 bits of YMM registers and
1637   // to avoid transaction penalty between AVX and SSE states. There is no
1638   // penalty if legacy SSE instructions are encoded using VEX prefix because
1639   // they always clear upper 128 bits. It should be used before calling
1640   // runtime code and native libraries.
1641   void vzeroupper();
1642 
1643  protected:
1644   // Next instructions require address alignment 16 bytes SSE mode.
1645   // They should be called only from corresponding MacroAssembler instructions.
1646   void andpd(XMMRegister dst, Address src);
1647   void andps(XMMRegister dst, Address src);
1648   void xorpd(XMMRegister dst, Address src);
1649   void xorps(XMMRegister dst, Address src);
1650 
1651 };
1652 
1653 
1654 // MacroAssembler extends Assembler by frequently used macros.
1655 //
1656 // Instructions for which a 'better' code sequence exists depending
1657 // on arguments should also go in here.
1658 
1659 class MacroAssembler: public Assembler {
1660   friend class LIR_Assembler;
1661   friend class Runtime1;      // as_Address()
1662 


2534   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
2535   void vdivss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vdivss(dst, nds, src); }
2536   void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2537 
2538   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
2539   void vmulsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulsd(dst, nds, src); }
2540   void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2541 
2542   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
2543   void vmulss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vmulss(dst, nds, src); }
2544   void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2545 
2546   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
2547   void vsubsd(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubsd(dst, nds, src); }
2548   void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2549 
2550   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
2551   void vsubss(XMMRegister dst, XMMRegister nds, Address src)     { Assembler::vsubss(dst, nds, src); }
2552   void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2553 
2554   // AVX Vector instructions
2555 
2556   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
2557   void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); }
2558   void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2559 
2560   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
2561   void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
2562   void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
2563 
2564 
2565   // Data
2566 
2567   void cmov32( Condition cc, Register dst, Address  src);
2568   void cmov32( Condition cc, Register dst, Register src);
2569 
2570   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
2571 
2572   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
2573   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
2574 
2575   void movoop(Register dst, jobject obj);
2576   void movoop(Address dst, jobject obj);
2577 
2578   void movptr(ArrayAddress dst, Register src);
2579   // can this do an lea?
2580   void movptr(Register dst, ArrayAddress src);


src/cpu/x86/vm/assembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File