src/cpu/x86/vm/assembler_x86.hpp

Print this page




1266   void lzcntl(Register dst, Register src);
1267 
1268 #ifdef _LP64
1269   void lzcntq(Register dst, Register src);
1270 #endif
1271 
1272   enum Membar_mask_bits {
1273     StoreStore = 1 << 3,
1274     LoadStore  = 1 << 2,
1275     StoreLoad  = 1 << 1,
1276     LoadLoad   = 1 << 0
1277   };
1278 
1279   // Serializes memory and blows flags
1280   void membar(Membar_mask_bits order_constraint) {
1281     if (os::is_MP()) {
1282       // We only have to handle StoreLoad
1283       if (order_constraint & StoreLoad) {
1284         // All usable chips support "locked" instructions which suffice
1285         // as barriers, and are much faster than the alternative of
1286         // using cpuid instruction. We use here a locked add [esp],0.
1287         // This is conveniently otherwise a no-op except for blowing
1288         // flags.







1289         // Any change to this code may need to revisit other places in
1290         // the code where this idiom is used, in particular the
1291         // orderAccess code.




1292         lock();
1293         addl(Address(rsp, 0), 0);// Assert the lock# signal here
1294       }
1295     }
1296   }
1297 
1298   void mfence();
1299 
1300   // Moves
1301 
1302   void mov64(Register dst, int64_t imm64);
1303 
1304   void movb(Address dst, Register src);
1305   void movb(Address dst, int imm8);
1306   void movb(Register dst, Address src);
1307 
1308   void movdl(XMMRegister dst, Register src);
1309   void movdl(Register dst, XMMRegister src);
1310   void movdl(XMMRegister dst, Address src);
1311   void movdl(Address dst, XMMRegister src);
1312 
1313   // Move Double Quadword




1266   void lzcntl(Register dst, Register src);
1267 
1268 #ifdef _LP64
1269   void lzcntq(Register dst, Register src);
1270 #endif
1271 
1272   enum Membar_mask_bits {
1273     StoreStore = 1 << 3,
1274     LoadStore  = 1 << 2,
1275     StoreLoad  = 1 << 1,
1276     LoadLoad   = 1 << 0
1277   };
1278 
1279   // Serializes memory and blows flags
1280   void membar(Membar_mask_bits order_constraint) {
1281     if (os::is_MP()) {
1282       // We only have to handle StoreLoad
1283       if (order_constraint & StoreLoad) {
1284         // All usable chips support "locked" instructions which suffice
1285         // as barriers, and are much faster than the alternative of
1286         // using cpuid instruction. We use here a locked add [esp+C],0.
1287         // This is conveniently otherwise a no-op except for blowing
1288         // flags, and introducing a false dependency on target memory
1289         // location. We can't do anything with flags, but we can avoid
1290         // memory dependencies in the current method by locked-adding
1291         // somewhere else on the stack. [esp+C] is convenient since it
1292         // is almost always in data cache, for any small C. Positive C
1293         // will collide with something on stack in current method, hence
1294         // we shoot for a small negative C.
1295         //
1296         // Any change to this code may need to revisit other places in
1297         // the code where this idiom is used, in particular the
1298         // orderAccess code.
1299 
1300         // TODO: Once JDK-8049717 is here, use the renamed method.
1301         int cl_size = VM_Version::prefetch_data_size();
1302 
1303         lock();
1304         addl(Address(rsp, -(cl_size + 8)), 0);// Assert the lock# signal here
1305       }
1306     }
1307   }
1308 
1309   void mfence();
1310 
1311   // Moves
1312 
1313   void mov64(Register dst, int64_t imm64);
1314 
1315   void movb(Address dst, Register src);
1316   void movb(Address dst, int imm8);
1317   void movb(Register dst, Address src);
1318 
1319   void movdl(XMMRegister dst, Register src);
1320   void movdl(Register dst, XMMRegister src);
1321   void movdl(XMMRegister dst, Address src);
1322   void movdl(Address dst, XMMRegister src);
1323 
1324   // Move Double Quadword