src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8031320_8u Sdiff src/cpu/x86/vm

src/cpu/x86/vm/macroAssembler_x86.hpp

Print this page
rev 5968 : 8031320: Use Intel RTM instructions for locks
Summary: Use RTM for inflated locks and stack locks.
Reviewed-by: iveresov, twisti, roland, dcubed


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "utilities/macros.hpp"

  30 
  31 
  32 // MacroAssembler extends Assembler by frequently used macros.
  33 //
  34 // Instructions for which a 'better' code sequence exists depending
  35 // on arguments should also go in here.
  36 
  37 class MacroAssembler: public Assembler {
  38   friend class LIR_Assembler;
  39   friend class Runtime1;      // as_Address()
  40 
  41  protected:
  42 
  43   Address as_Address(AddressLiteral adr);
  44   Address as_Address(ArrayAddress adr);
  45 
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore


  94   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  95 
  96   // Support for NULL-checks
  97   //
  98   // Generates code that causes a NULL OS exception if the content of reg is NULL.
  99   // If the accessed location is M[reg + offset] and the offset is known, provide the
 100   // offset. No explicit code generation is needed if the offset is within a certain
 101   // range (0 <= offset <= page_size).
 102 
 103   void null_check(Register reg, int offset = -1);
 104   static bool needs_explicit_null_check(intptr_t offset);
 105 
 106   // Required platform-specific helpers for Label::patch_instructions.
 107   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 108   void pd_patch_instruction(address branch, address target) {
 109     unsigned char op = branch[0];
 110     assert(op == 0xE8 /* call */ ||
 111         op == 0xE9 /* jmp */ ||
 112         op == 0xEB /* short jmp */ ||
 113         (op & 0xF0) == 0x70 /* short jcc */ ||
 114         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,

 115         "Invalid opcode at patch point");
 116 
 117     if (op == 0xEB || (op & 0xF0) == 0x70) {
 118       // short offset operators (jmp and jcc)
 119       char* disp = (char*) &branch[1];
 120       int imm8 = target - (address) &disp[1];
 121       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
 122       *disp = imm8;
 123     } else {
 124       int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
 125       int imm32 = target - (address) &disp[1];
 126       *disp = imm32;
 127     }
 128   }
 129 
 130   // The following 4 methods return the offset of the appropriate move instruction
 131 
 132   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 133   int load_unsigned_byte(Register dst, Address src);
 134   int load_unsigned_short(Register dst, Address src);
 135 
 136   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 137   int load_signed_byte(Register dst, Address src);
 138   int load_signed_short(Register dst, Address src);
 139 
 140   // Support for sign-extension (hi:lo = extend_sign(lo))
 141   void extend_sign(Register hi, Register lo);
 142 
 143   // Load and store values by size and signed-ness
 144   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 145   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 146 
 147   // Support for inc/dec with optimal instruction selection depending on value
 148 
 149   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 150   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 151 
 152   void decrementl(Address dst, int value = 1);
 153   void decrementl(Register reg, int value = 1);
 154 
 155   void decrementq(Register reg, int value = 1);
 156   void decrementq(Address dst, int value = 1);
 157 
 158   void incrementl(Address dst, int value = 1);
 159   void incrementl(Register reg, int value = 1);
 160 
 161   void incrementq(Register reg, int value = 1);
 162   void incrementq(Address dst, int value = 1);
 163 
 164 
 165   // Support optimal SSE move instructions.
 166   void movflt(XMMRegister dst, XMMRegister src) {
 167     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 168     else                       { movss (dst, src); return; }
 169   }
 170   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 171   void movflt(XMMRegister dst, AddressLiteral src);
 172   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 173 
 174   void movdbl(XMMRegister dst, XMMRegister src) {
 175     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 176     else                       { movsd (dst, src); return; }
 177   }
 178 
 179   void movdbl(XMMRegister dst, AddressLiteral src);
 180 
 181   void movdbl(XMMRegister dst, Address src) {
 182     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 183     else                         { movlpd(dst, src); return; }
 184   }
 185   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 186 
 187   void incrementl(AddressLiteral dst);
 188   void incrementl(ArrayAddress dst);
 189 


 190   // Alignment
 191   void align(int modulus);
 192 
 193   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 194   void fat_nop();
 195 
 196   // Stack frame creation/removal
 197   void enter();
 198   void leave();
 199 
 200   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 201   // The pointer will be loaded into the thread register.
 202   void get_thread(Register thread);
 203 
 204 
 205   // Support for VM calls
 206   //
 207   // It is imperative that all calls into the VM are handled via the call_VM macros.
 208   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 209   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.


 637   // lock_reg and obj_reg must be loaded up with the appropriate values.
 638   // swap_reg must be rax, and is killed.
 639   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 640   // be killed; if not supplied, push/pop will be used internally to
 641   // allocate a temporary (inefficient, avoid if possible).
 642   // Optional slow case is for implementations (interpreter and C1) which branch to
 643   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 644   // Returns offset of first potentially-faulting instruction for null
 645   // check info (currently consumed only by C1). If
 646   // swap_reg_contains_mark is true then returns -1 as it is assumed
 647   // the calling code has already passed any potential faults.
 648   int biased_locking_enter(Register lock_reg, Register obj_reg,
 649                            Register swap_reg, Register tmp_reg,
 650                            bool swap_reg_contains_mark,
 651                            Label& done, Label* slow_case = NULL,
 652                            BiasedLockingCounters* counters = NULL);
 653   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 654 #ifdef COMPILER2
 655   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 656   // See full desription in macroAssembler_x86.cpp.
 657   void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
 658   void fast_unlock(Register obj, Register box, Register tmp);




























 659 #endif
 660 
 661   Condition negate_condition(Condition cond);
 662 
 663   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 664   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 665   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 666   // here in MacroAssembler. The major exception to this rule is call
 667 
 668   // Arithmetics
 669 
 670 
 671   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 672   void addptr(Address dst, Register src);
 673 
 674   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 675   void addptr(Register dst, int32_t src);
 676   void addptr(Register dst, Register src);
 677   void addptr(Register dst, RegisterOrConstant src) {
 678     if (src.is_constant()) addptr(dst, (int) src.as_constant());


 704   void cmpptr(Address src1, AddressLiteral src2);
 705 
 706   void cmpptr(Register src1, AddressLiteral src2);
 707 
 708   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 709   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 710   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 711 
 712   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 713   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 714 
 715   // cmp64 to avoild hiding cmpq
 716   void cmp64(Register src1, AddressLiteral src);
 717 
 718   void cmpxchgptr(Register reg, Address adr);
 719 
 720   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
 721 
 722 
 723   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }

 724 
 725 
 726   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 727 
 728   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 729 
 730   void shlptr(Register dst, int32_t shift);
 731   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 732 
 733   void shrptr(Register dst, int32_t shift);
 734   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 735 
 736   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 737   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 738 
 739   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 740 
 741   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 742   void subptr(Register dst, int32_t src);
 743   // Force generation of a 4 byte immediate value even if it fits into 8bit


 745   void subptr(Register dst, Register src);
 746   void subptr(Register dst, RegisterOrConstant src) {
 747     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 748     else                   subptr(dst,       src.as_register());
 749   }
 750 
 751   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 752   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 753 
 754   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 755   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 756 
 757   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 758 
 759 
 760 
 761   // Helper functions for statistics gathering.
 762   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 763   void cond_inc32(Condition cond, AddressLiteral counter_addr);
 764   // Unconditional atomic increment.
 765   void atomic_incl(AddressLiteral counter_addr);







 766 
 767   void lea(Register dst, AddressLiteral adr);
 768   void lea(Address dst, AddressLiteral adr);
 769   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
 770 
 771   void leal32(Register dst, Address src) { leal(dst, src); }
 772 
 773   // Import other testl() methods from the parent class or else
 774   // they will be hidden by the following overriding declaration.
 775   using Assembler::testl;
 776   void testl(Register dst, AddressLiteral src);
 777 
 778   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 779   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 780   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 781   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 782 
 783   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 784   void testptr(Register src1, Register src2);
 785 


1057   void cmov32( Condition cc, Register dst, Address  src);
1058   void cmov32( Condition cc, Register dst, Register src);
1059 
1060   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1061 
1062   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1063   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1064 
1065   void movoop(Register dst, jobject obj);
1066   void movoop(Address dst, jobject obj);
1067 
1068   void mov_metadata(Register dst, Metadata* obj);
1069   void mov_metadata(Address dst, Metadata* obj);
1070 
1071   void movptr(ArrayAddress dst, Register src);
1072   // can this do an lea?
1073   void movptr(Register dst, ArrayAddress src);
1074 
1075   void movptr(Register dst, Address src);
1076 
1077   void movptr(Register dst, AddressLiteral src);




1078 
1079   void movptr(Register dst, intptr_t src);
1080   void movptr(Register dst, Register src);
1081   void movptr(Address dst, intptr_t src);
1082 
1083   void movptr(Address dst, Register src);
1084 
1085   void movptr(Register dst, RegisterOrConstant src) {
1086     if (src.is_constant()) movptr(dst, src.as_constant());
1087     else                   movptr(dst, src.as_register());
1088   }
1089 
1090 #ifdef _LP64
1091   // Generally the next two are only used for moving NULL
1092   // Although there are situations in initializing the mark word where
1093   // they could be used. They are dangerous.
1094 
1095   // They only exist on LP64 so that int32_t and intptr_t are not the same
1096   // and we have ambiguous declarations.
1097 




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "utilities/macros.hpp"
  30 #include "runtime/rtmLocking.hpp"
  31 
  32 
  33 // MacroAssembler extends Assembler by frequently used macros.
  34 //
  35 // Instructions for which a 'better' code sequence exists depending
  36 // on arguments should also go in here.
  37 
  38 class MacroAssembler: public Assembler {
  39   friend class LIR_Assembler;
  40   friend class Runtime1;      // as_Address()
  41 
  42  protected:
  43 
  44   Address as_Address(AddressLiteral adr);
  45   Address as_Address(ArrayAddress adr);
  46 
  47   // Support for VM calls
  48   //
  49   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  50   // may customize this version by overriding it for its purposes (e.g., to save/restore


  95   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  96 
  97   // Support for NULL-checks
  98   //
  99   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 100   // If the accessed location is M[reg + offset] and the offset is known, provide the
 101   // offset. No explicit code generation is needed if the offset is within a certain
 102   // range (0 <= offset <= page_size).
 103 
 104   void null_check(Register reg, int offset = -1);
 105   static bool needs_explicit_null_check(intptr_t offset);
 106 
 107   // Required platform-specific helpers for Label::patch_instructions.
 108   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 109   void pd_patch_instruction(address branch, address target) {
 110     unsigned char op = branch[0];
 111     assert(op == 0xE8 /* call */ ||
 112         op == 0xE9 /* jmp */ ||
 113         op == 0xEB /* short jmp */ ||
 114         (op & 0xF0) == 0x70 /* short jcc */ ||
 115         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 116         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 117         "Invalid opcode at patch point");
 118 
 119     if (op == 0xEB || (op & 0xF0) == 0x70) {
 120       // short offset operators (jmp and jcc)
 121       char* disp = (char*) &branch[1];
 122       int imm8 = target - (address) &disp[1];
 123       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
 124       *disp = imm8;
 125     } else {
 126       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
 127       int imm32 = target - (address) &disp[1];
 128       *disp = imm32;
 129     }
 130   }
 131 
 132   // The following 4 methods return the offset of the appropriate move instruction
 133 
 134   // Support for fast byte/short loading with zero extension (depending on particular CPU)
 135   int load_unsigned_byte(Register dst, Address src);
 136   int load_unsigned_short(Register dst, Address src);
 137 
 138   // Support for fast byte/short loading with sign extension (depending on particular CPU)
 139   int load_signed_byte(Register dst, Address src);
 140   int load_signed_short(Register dst, Address src);
 141 
 142   // Support for sign-extension (hi:lo = extend_sign(lo))
 143   void extend_sign(Register hi, Register lo);
 144 
 145   // Load and store values by size and signed-ness
 146   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
 147   void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 148 
 149   // Support for inc/dec with optimal instruction selection depending on value
 150 
 151   void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
 152   void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
 153 
 154   void decrementl(Address dst, int value = 1);
 155   void decrementl(Register reg, int value = 1);
 156 
 157   void decrementq(Register reg, int value = 1);
 158   void decrementq(Address dst, int value = 1);
 159 
 160   void incrementl(Address dst, int value = 1);
 161   void incrementl(Register reg, int value = 1);
 162 
 163   void incrementq(Register reg, int value = 1);
 164   void incrementq(Address dst, int value = 1);
 165 

 166   // Support optimal SSE move instructions.
 167   void movflt(XMMRegister dst, XMMRegister src) {
 168     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
 169     else                       { movss (dst, src); return; }
 170   }
 171   void movflt(XMMRegister dst, Address src) { movss(dst, src); }
 172   void movflt(XMMRegister dst, AddressLiteral src);
 173   void movflt(Address dst, XMMRegister src) { movss(dst, src); }
 174 
 175   void movdbl(XMMRegister dst, XMMRegister src) {
 176     if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
 177     else                       { movsd (dst, src); return; }
 178   }
 179 
 180   void movdbl(XMMRegister dst, AddressLiteral src);
 181 
 182   void movdbl(XMMRegister dst, Address src) {
 183     if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
 184     else                         { movlpd(dst, src); return; }
 185   }
 186   void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
 187 
 188   void incrementl(AddressLiteral dst);
 189   void incrementl(ArrayAddress dst);
 190 
 191   void incrementq(AddressLiteral dst);
 192 
 193   // Alignment
 194   void align(int modulus);
 195 
 196   // A 5 byte nop that is safe for patching (see patch_verified_entry)
 197   void fat_nop();
 198 
 199   // Stack frame creation/removal
 200   void enter();
 201   void leave();
 202 
 203   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 204   // The pointer will be loaded into the thread register.
 205   void get_thread(Register thread);
 206 
 207 
 208   // Support for VM calls
 209   //
 210   // It is imperative that all calls into the VM are handled via the call_VM macros.
 211   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 212   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.


 640   // lock_reg and obj_reg must be loaded up with the appropriate values.
 641   // swap_reg must be rax, and is killed.
 642   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 643   // be killed; if not supplied, push/pop will be used internally to
 644   // allocate a temporary (inefficient, avoid if possible).
 645   // Optional slow case is for implementations (interpreter and C1) which branch to
 646   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 647   // Returns offset of first potentially-faulting instruction for null
 648   // check info (currently consumed only by C1). If
 649   // swap_reg_contains_mark is true then returns -1 as it is assumed
 650   // the calling code has already passed any potential faults.
 651   int biased_locking_enter(Register lock_reg, Register obj_reg,
 652                            Register swap_reg, Register tmp_reg,
 653                            bool swap_reg_contains_mark,
 654                            Label& done, Label* slow_case = NULL,
 655                            BiasedLockingCounters* counters = NULL);
 656   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 657 #ifdef COMPILER2
 658   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 659   // See full desription in macroAssembler_x86.cpp.
 660   void fast_lock(Register obj, Register box, Register tmp,
 661                  Register scr, Register cx1, Register cx2,
 662                  BiasedLockingCounters* counters,
 663                  RTMLockingCounters* rtm_counters,
 664                  RTMLockingCounters* stack_rtm_counters,
 665                  Metadata* method_data,
 666                  bool use_rtm, bool profile_rtm);
 667   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 668 #if INCLUDE_RTM_OPT
 669   void rtm_counters_update(Register abort_status, Register rtm_counters);
 670   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 671   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 672                                    RTMLockingCounters* rtm_counters,
 673                                    Metadata* method_data);
 674   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 675                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 676   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 677   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
 678   void rtm_stack_locking(Register obj, Register tmp, Register scr,
 679                          Register retry_on_abort_count,
 680                          RTMLockingCounters* stack_rtm_counters,
 681                          Metadata* method_data, bool profile_rtm,
 682                          Label& DONE_LABEL, Label& IsInflated);
 683   void rtm_inflated_locking(Register obj, Register box, Register tmp,
 684                             Register scr, Register retry_on_busy_count,
 685                             Register retry_on_abort_count,
 686                             RTMLockingCounters* rtm_counters,
 687                             Metadata* method_data, bool profile_rtm,
 688                             Label& DONE_LABEL);
 689 #endif
 690 #endif
 691 
 692   Condition negate_condition(Condition cond);
 693 
 694   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 695   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 696   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 697   // here in MacroAssembler. The major exception to this rule is call
 698 
 699   // Arithmetics
 700 
 701 
 702   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 703   void addptr(Address dst, Register src);
 704 
 705   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 706   void addptr(Register dst, int32_t src);
 707   void addptr(Register dst, Register src);
 708   void addptr(Register dst, RegisterOrConstant src) {
 709     if (src.is_constant()) addptr(dst, (int) src.as_constant());


 735   void cmpptr(Address src1, AddressLiteral src2);
 736 
 737   void cmpptr(Register src1, AddressLiteral src2);
 738 
 739   void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 740   void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 741   // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 742 
 743   void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 744   void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
 745 
 746   // cmp64 to avoild hiding cmpq
 747   void cmp64(Register src1, AddressLiteral src);
 748 
 749   void cmpxchgptr(Register reg, Address adr);
 750 
 751   void locked_cmpxchgptr(Register reg, AddressLiteral adr);
 752 
 753 
 754   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
 755   void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 756 
 757 
 758   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
 759 
 760   void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
 761 
 762   void shlptr(Register dst, int32_t shift);
 763   void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
 764 
 765   void shrptr(Register dst, int32_t shift);
 766   void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
 767 
 768   void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
 769   void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
 770 
 771   void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 772 
 773   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
 774   void subptr(Register dst, int32_t src);
 775   // Force generation of a 4 byte immediate value even if it fits into 8bit


 777   void subptr(Register dst, Register src);
 778   void subptr(Register dst, RegisterOrConstant src) {
 779     if (src.is_constant()) subptr(dst, (int) src.as_constant());
 780     else                   subptr(dst,       src.as_register());
 781   }
 782 
 783   void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 784   void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
 785 
 786   void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 787   void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
 788 
 789   void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
 790 
 791 
 792 
 793   // Helper functions for statistics gathering.
 794   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
 795   void cond_inc32(Condition cond, AddressLiteral counter_addr);
 796   // Unconditional atomic increment.
 797   void atomic_incl(Address counter_addr);
 798   void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
 799 #ifdef _LP64
 800   void atomic_incq(Address counter_addr);
 801   void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
 802 #endif
 803   void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
 804   void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 805 
 806   void lea(Register dst, AddressLiteral adr);
 807   void lea(Address dst, AddressLiteral adr);
 808   void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
 809 
 810   void leal32(Register dst, Address src) { leal(dst, src); }
 811 
 812   // Import other testl() methods from the parent class or else
 813   // they will be hidden by the following overriding declaration.
 814   using Assembler::testl;
 815   void testl(Register dst, AddressLiteral src);
 816 
 817   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 818   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 819   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
 820   void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 821 
 822   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
 823   void testptr(Register src1, Register src2);
 824 


1096   void cmov32( Condition cc, Register dst, Address  src);
1097   void cmov32( Condition cc, Register dst, Register src);
1098 
1099   void cmov(   Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1100 
1101   void cmovptr(Condition cc, Register dst, Address  src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1102   void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1103 
1104   void movoop(Register dst, jobject obj);
1105   void movoop(Address dst, jobject obj);
1106 
1107   void mov_metadata(Register dst, Metadata* obj);
1108   void mov_metadata(Address dst, Metadata* obj);
1109 
1110   void movptr(ArrayAddress dst, Register src);
1111   // can this do an lea?
1112   void movptr(Register dst, ArrayAddress src);
1113 
1114   void movptr(Register dst, Address src);
1115 
1116 #ifdef _LP64
1117   void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1118 #else
1119   void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1120 #endif
1121 
1122   void movptr(Register dst, intptr_t src);
1123   void movptr(Register dst, Register src);
1124   void movptr(Address dst, intptr_t src);
1125 
1126   void movptr(Address dst, Register src);
1127 
1128   void movptr(Register dst, RegisterOrConstant src) {
1129     if (src.is_constant()) movptr(dst, src.as_constant());
1130     else                   movptr(dst, src.as_register());
1131   }
1132 
1133 #ifdef _LP64
1134   // Generally the next two are only used for moving NULL
1135   // Although there are situations in initializing the mark word where
1136   // they could be used. They are dangerous.
1137 
1138   // They only exist on LP64 so that int32_t and intptr_t are not the same
1139   // and we have ambiguous declarations.
1140 


src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File