src/cpu/x86/vm/assembler_x86.hpp

Print this page




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_ASSEMBLER_X86_HPP
  27 
  28 #include "asm/register.hpp"

  29 
  30 class BiasedLockingCounters;
  31 
  32 // Contains all the definitions needed for x86 assembly code generation.
  33 
  34 // Calling convention
  35 class Argument VALUE_OBJ_CLASS_SPEC {
  36  public:
  37   enum {
  38 #ifdef _LP64
  39 #ifdef _WIN64
  40     n_int_register_parameters_c   = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
  41     n_float_register_parameters_c = 4,  // xmm0 - xmm3 (c_farg0, c_farg1, ... )
  42 #else
  43     n_int_register_parameters_c   = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
  44     n_float_register_parameters_c = 8,  // xmm0 - xmm7 (c_farg0, c_farg1, ... )
  45 #endif // _WIN64
  46     n_int_register_parameters_j   = 6, // j_rarg0, j_rarg1, ...
  47     n_float_register_parameters_j = 8  // j_farg0, j_farg1, ...
  48 #else


1266   void lzcntl(Register dst, Register src);
1267 
1268 #ifdef _LP64
1269   void lzcntq(Register dst, Register src);
1270 #endif
1271 
1272   enum Membar_mask_bits {
1273     StoreStore = 1 << 3,
1274     LoadStore  = 1 << 2,
1275     StoreLoad  = 1 << 1,
1276     LoadLoad   = 1 << 0
1277   };
1278 
1279   // Serializes memory and blows flags
1280   void membar(Membar_mask_bits order_constraint) {
1281     if (os::is_MP()) {
1282       // We only have to handle StoreLoad
1283       if (order_constraint & StoreLoad) {
1284         // All usable chips support "locked" instructions which suffice
1285         // as barriers, and are much faster than the alternative of
1286         // using cpuid instruction. We use here a locked add [esp],0.
1287         // This is conveniently otherwise a no-op except for blowing
1288         // flags.














1289         // Any change to this code may need to revisit other places in
1290         // the code where this idiom is used, in particular the
1291         // orderAccess code.






1292         lock();
1293         addl(Address(rsp, 0), 0);// Assert the lock# signal here
1294       }
1295     }
1296   }
1297 
1298   void mfence();
1299 
1300   // Moves
1301 
1302   void mov64(Register dst, int64_t imm64);
1303 
1304   void movb(Address dst, Register src);
1305   void movb(Address dst, int imm8);
1306   void movb(Register dst, Address src);
1307 
1308   void movdl(XMMRegister dst, Register src);
1309   void movdl(Register dst, XMMRegister src);
1310   void movdl(XMMRegister dst, Address src);
1311   void movdl(Address dst, XMMRegister src);
1312 
1313   // Move Double Quadword




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_ASSEMBLER_X86_HPP
  27 
  28 #include "asm/register.hpp"
  29 #include "vm_version_x86.hpp"
  30 
  31 class BiasedLockingCounters;
  32 
  33 // Contains all the definitions needed for x86 assembly code generation.
  34 
  35 // Calling convention
  36 class Argument VALUE_OBJ_CLASS_SPEC {
  37  public:
  38   enum {
  39 #ifdef _LP64
  40 #ifdef _WIN64
  41     n_int_register_parameters_c   = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
  42     n_float_register_parameters_c = 4,  // xmm0 - xmm3 (c_farg0, c_farg1, ... )
  43 #else
  44     n_int_register_parameters_c   = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
  45     n_float_register_parameters_c = 8,  // xmm0 - xmm7 (c_farg0, c_farg1, ... )
  46 #endif // _WIN64
  47     n_int_register_parameters_j   = 6, // j_rarg0, j_rarg1, ...
  48     n_float_register_parameters_j = 8  // j_farg0, j_farg1, ...
  49 #else


1267   void lzcntl(Register dst, Register src);
1268 
1269 #ifdef _LP64
1270   void lzcntq(Register dst, Register src);
1271 #endif
1272 
1273   enum Membar_mask_bits {
1274     StoreStore = 1 << 3,
1275     LoadStore  = 1 << 2,
1276     StoreLoad  = 1 << 1,
1277     LoadLoad   = 1 << 0
1278   };
1279 
1280   // Serializes memory and blows flags
1281   void membar(Membar_mask_bits order_constraint) {
1282     if (os::is_MP()) {
1283       // We only have to handle StoreLoad
1284       if (order_constraint & StoreLoad) {
1285         // All usable chips support "locked" instructions which suffice
1286         // as barriers, and are much faster than the alternative of
1287         // using cpuid instruction. We use here a locked add [esp-C],0.
1288         // This is conveniently otherwise a no-op except for blowing
1289         // flags, and introducing a false dependency on target memory
1290         // location. We can't do anything with flags, but we can avoid
1291         // memory dependencies in the current method by locked-adding
1292         // somewhere else on the stack. Doing [esp+C] will collide with
1293         // something on stack in current method, hence we go for [esp-C].
1294         // It is convenient since it is almost always in data cache, for
1295         // any small C.  We need to step back from SP to avoid data
1296         // dependencies with other things on below SP (callee-saves, for
1297         // example). Without a clear way to figure out the minimal safe
1298         // distance from SP, it makes sense to step back the complete
1299         // cache line, as this will also avoid possible second-order effects
1300         // with locked ops against the cache line. Our choice of offset
1301         // is bounded by x86 operand encoding, which should stay within
1302         // [-128; +127] to have the 8-byte displacement encoding.
1303         //
1304         // Any change to this code may need to revisit other places in
1305         // the code where this idiom is used, in particular the
1306         // orderAccess code.
1307 
1308         int offset = -VM_Version::L1_line_size();
1309         if (offset < -128) {
1310           offset = -128;
1311         }
1312 
1313         lock();
1314         addl(Address(rsp, offset), 0);// Assert the lock# signal here
1315       }
1316     }
1317   }
1318 
1319   void mfence();
1320 
1321   // Moves
1322 
1323   void mov64(Register dst, int64_t imm64);
1324 
1325   void movb(Address dst, Register src);
1326   void movb(Address dst, int imm8);
1327   void movb(Register dst, Address src);
1328 
1329   void movdl(XMMRegister dst, Register src);
1330   void movdl(Register dst, XMMRegister src);
1331   void movdl(XMMRegister dst, Address src);
1332   void movdl(Address dst, XMMRegister src);
1333 
1334   // Move Double Quadword