10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "utilities/macros.hpp"
30
31
32 // MacroAssembler extends Assembler by frequently used macros.
33 //
34 // Instructions for which a 'better' code sequence exists depending
35 // on arguments should also go in here.
36
37 class MacroAssembler: public Assembler {
38 friend class LIR_Assembler;
39 friend class Runtime1; // as_Address()
40
41 protected:
42
43 Address as_Address(AddressLiteral adr);
44 Address as_Address(ArrayAddress adr);
45
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
94 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
95
96 // Support for NULL-checks
97 //
98 // Generates code that causes a NULL OS exception if the content of reg is NULL.
99 // If the accessed location is M[reg + offset] and the offset is known, provide the
100 // offset. No explicit code generation is needed if the offset is within a certain
101 // range (0 <= offset <= page_size).
102
103 void null_check(Register reg, int offset = -1);
104 static bool needs_explicit_null_check(intptr_t offset);
105
106 // Required platform-specific helpers for Label::patch_instructions.
107 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
108 void pd_patch_instruction(address branch, address target) {
109 unsigned char op = branch[0];
110 assert(op == 0xE8 /* call */ ||
111 op == 0xE9 /* jmp */ ||
112 op == 0xEB /* short jmp */ ||
113 (op & 0xF0) == 0x70 /* short jcc */ ||
114 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
115 "Invalid opcode at patch point");
116
117 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1];
120 int imm8 = target - (address) &disp[1];
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
122 *disp = imm8;
123 } else {
124 int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
125 int imm32 = target - (address) &disp[1];
126 *disp = imm32;
127 }
128 }
129
130 // The following 4 methods return the offset of the appropriate move instruction
131
132 // Support for fast byte/short loading with zero extension (depending on particular CPU)
133 int load_unsigned_byte(Register dst, Address src);
134 int load_unsigned_short(Register dst, Address src);
135
136 // Support for fast byte/short loading with sign extension (depending on particular CPU)
137 int load_signed_byte(Register dst, Address src);
138 int load_signed_short(Register dst, Address src);
139
140 // Support for sign-extension (hi:lo = extend_sign(lo))
141 void extend_sign(Register hi, Register lo);
142
143 // Load and store values by size and signed-ness
144 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
145 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
146
147 // Support for inc/dec with optimal instruction selection depending on value
148
149 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
150 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
151
152 void decrementl(Address dst, int value = 1);
153 void decrementl(Register reg, int value = 1);
154
155 void decrementq(Register reg, int value = 1);
156 void decrementq(Address dst, int value = 1);
157
158 void incrementl(Address dst, int value = 1);
159 void incrementl(Register reg, int value = 1);
160
161 void incrementq(Register reg, int value = 1);
162 void incrementq(Address dst, int value = 1);
163
164
165 // Support optimal SSE move instructions.
166 void movflt(XMMRegister dst, XMMRegister src) {
167 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
168 else { movss (dst, src); return; }
169 }
170 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
171 void movflt(XMMRegister dst, AddressLiteral src);
172 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
173
174 void movdbl(XMMRegister dst, XMMRegister src) {
175 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
176 else { movsd (dst, src); return; }
177 }
178
179 void movdbl(XMMRegister dst, AddressLiteral src);
180
181 void movdbl(XMMRegister dst, Address src) {
182 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
183 else { movlpd(dst, src); return; }
184 }
185 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
186
187 void incrementl(AddressLiteral dst);
188 void incrementl(ArrayAddress dst);
189
190 // Alignment
191 void align(int modulus);
192
193 // A 5 byte nop that is safe for patching (see patch_verified_entry)
194 void fat_nop();
195
196 // Stack frame creation/removal
197 void enter();
198 void leave();
199
200 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
201 // The pointer will be loaded into the thread register.
202 void get_thread(Register thread);
203
204
205 // Support for VM calls
206 //
207 // It is imperative that all calls into the VM are handled via the call_VM macros.
208 // They make sure that the stack linkage is setup correctly. call_VM's correspond
209 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
637 // lock_reg and obj_reg must be loaded up with the appropriate values.
638 // swap_reg must be rax, and is killed.
639 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
640 // be killed; if not supplied, push/pop will be used internally to
641 // allocate a temporary (inefficient, avoid if possible).
642 // Optional slow case is for implementations (interpreter and C1) which branch to
643 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
644 // Returns offset of first potentially-faulting instruction for null
645 // check info (currently consumed only by C1). If
646 // swap_reg_contains_mark is true then returns -1 as it is assumed
647 // the calling code has already passed any potential faults.
648 int biased_locking_enter(Register lock_reg, Register obj_reg,
649 Register swap_reg, Register tmp_reg,
650 bool swap_reg_contains_mark,
651 Label& done, Label* slow_case = NULL,
652 BiasedLockingCounters* counters = NULL);
653 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
654 #ifdef COMPILER2
655 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
656 // See full desription in macroAssembler_x86.cpp.
657 void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
658 void fast_unlock(Register obj, Register box, Register tmp);
659 #endif
660
661 Condition negate_condition(Condition cond);
662
663 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
664 // operands. In general the names are modified to avoid hiding the instruction in Assembler
665 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
666 // here in MacroAssembler. The major exception to this rule is call
667
668 // Arithmetics
669
670
671 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
672 void addptr(Address dst, Register src);
673
674 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
675 void addptr(Register dst, int32_t src);
676 void addptr(Register dst, Register src);
677 void addptr(Register dst, RegisterOrConstant src) {
678 if (src.is_constant()) addptr(dst, (int) src.as_constant());
704 void cmpptr(Address src1, AddressLiteral src2);
705
706 void cmpptr(Register src1, AddressLiteral src2);
707
708 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
709 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
710 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
711
712 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
713 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
714
715 // cmp64 to avoild hiding cmpq
716 void cmp64(Register src1, AddressLiteral src);
717
718 void cmpxchgptr(Register reg, Address adr);
719
720 void locked_cmpxchgptr(Register reg, AddressLiteral adr);
721
722
723 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
724
725
726 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
727
728 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
729
730 void shlptr(Register dst, int32_t shift);
731 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
732
733 void shrptr(Register dst, int32_t shift);
734 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
735
736 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
737 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
738
739 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
740
741 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
742 void subptr(Register dst, int32_t src);
743 // Force generation of a 4 byte immediate value even if it fits into 8bit
745 void subptr(Register dst, Register src);
746 void subptr(Register dst, RegisterOrConstant src) {
747 if (src.is_constant()) subptr(dst, (int) src.as_constant());
748 else subptr(dst, src.as_register());
749 }
750
751 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
752 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
753
754 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
755 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
756
757 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
758
759
760
761 // Helper functions for statistics gathering.
762 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
763 void cond_inc32(Condition cond, AddressLiteral counter_addr);
764 // Unconditional atomic increment.
765 void atomic_incl(AddressLiteral counter_addr);
766
767 void lea(Register dst, AddressLiteral adr);
768 void lea(Address dst, AddressLiteral adr);
769 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
770
771 void leal32(Register dst, Address src) { leal(dst, src); }
772
773 // Import other testl() methods from the parent class or else
774 // they will be hidden by the following overriding declaration.
775 using Assembler::testl;
776 void testl(Register dst, AddressLiteral src);
777
778 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
779 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
780 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
781 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
782
783 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
784 void testptr(Register src1, Register src2);
785
1057 void cmov32( Condition cc, Register dst, Address src);
1058 void cmov32( Condition cc, Register dst, Register src);
1059
1060 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1061
1062 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1063 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1064
1065 void movoop(Register dst, jobject obj);
1066 void movoop(Address dst, jobject obj);
1067
1068 void mov_metadata(Register dst, Metadata* obj);
1069 void mov_metadata(Address dst, Metadata* obj);
1070
1071 void movptr(ArrayAddress dst, Register src);
1072 // can this do an lea?
1073 void movptr(Register dst, ArrayAddress src);
1074
1075 void movptr(Register dst, Address src);
1076
1077 void movptr(Register dst, AddressLiteral src);
1078
1079 void movptr(Register dst, intptr_t src);
1080 void movptr(Register dst, Register src);
1081 void movptr(Address dst, intptr_t src);
1082
1083 void movptr(Address dst, Register src);
1084
1085 void movptr(Register dst, RegisterOrConstant src) {
1086 if (src.is_constant()) movptr(dst, src.as_constant());
1087 else movptr(dst, src.as_register());
1088 }
1089
1090 #ifdef _LP64
1091 // Generally the next two are only used for moving NULL
1092 // Although there are situations in initializing the mark word where
1093 // they could be used. They are dangerous.
1094
1095 // They only exist on LP64 so that int32_t and intptr_t are not the same
1096 // and we have ambiguous declarations.
1097
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "utilities/macros.hpp"
30 #include "runtime/rtmLocking.hpp"
31
32
33 // MacroAssembler extends Assembler by frequently used macros.
34 //
35 // Instructions for which a 'better' code sequence exists depending
36 // on arguments should also go in here.
37
38 class MacroAssembler: public Assembler {
39 friend class LIR_Assembler;
40 friend class Runtime1; // as_Address()
41
42 protected:
43
44 Address as_Address(AddressLiteral adr);
45 Address as_Address(ArrayAddress adr);
46
47 // Support for VM calls
48 //
49 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
50 // may customize this version by overriding it for its purposes (e.g., to save/restore
95 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
96
97 // Support for NULL-checks
98 //
99 // Generates code that causes a NULL OS exception if the content of reg is NULL.
100 // If the accessed location is M[reg + offset] and the offset is known, provide the
101 // offset. No explicit code generation is needed if the offset is within a certain
102 // range (0 <= offset <= page_size).
103
104 void null_check(Register reg, int offset = -1);
105 static bool needs_explicit_null_check(intptr_t offset);
106
107 // Required platform-specific helpers for Label::patch_instructions.
108 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
109 void pd_patch_instruction(address branch, address target) {
110 unsigned char op = branch[0];
111 assert(op == 0xE8 /* call */ ||
112 op == 0xE9 /* jmp */ ||
113 op == 0xEB /* short jmp */ ||
114 (op & 0xF0) == 0x70 /* short jcc */ ||
115 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
116 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
117 "Invalid opcode at patch point");
118
119 if (op == 0xEB || (op & 0xF0) == 0x70) {
120 // short offset operators (jmp and jcc)
121 char* disp = (char*) &branch[1];
122 int imm8 = target - (address) &disp[1];
123 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
124 *disp = imm8;
125 } else {
126 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
127 int imm32 = target - (address) &disp[1];
128 *disp = imm32;
129 }
130 }
131
132 // The following 4 methods return the offset of the appropriate move instruction
133
134 // Support for fast byte/short loading with zero extension (depending on particular CPU)
135 int load_unsigned_byte(Register dst, Address src);
136 int load_unsigned_short(Register dst, Address src);
137
138 // Support for fast byte/short loading with sign extension (depending on particular CPU)
139 int load_signed_byte(Register dst, Address src);
140 int load_signed_short(Register dst, Address src);
141
142 // Support for sign-extension (hi:lo = extend_sign(lo))
143 void extend_sign(Register hi, Register lo);
144
145 // Load and store values by size and signed-ness
146 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
147 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
148
149 // Support for inc/dec with optimal instruction selection depending on value
150
151 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
152 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
153
154 void decrementl(Address dst, int value = 1);
155 void decrementl(Register reg, int value = 1);
156
157 void decrementq(Register reg, int value = 1);
158 void decrementq(Address dst, int value = 1);
159
160 void incrementl(Address dst, int value = 1);
161 void incrementl(Register reg, int value = 1);
162
163 void incrementq(Register reg, int value = 1);
164 void incrementq(Address dst, int value = 1);
165
166 // Support optimal SSE move instructions.
167 void movflt(XMMRegister dst, XMMRegister src) {
168 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
169 else { movss (dst, src); return; }
170 }
171 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
172 void movflt(XMMRegister dst, AddressLiteral src);
173 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
174
175 void movdbl(XMMRegister dst, XMMRegister src) {
176 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
177 else { movsd (dst, src); return; }
178 }
179
180 void movdbl(XMMRegister dst, AddressLiteral src);
181
182 void movdbl(XMMRegister dst, Address src) {
183 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
184 else { movlpd(dst, src); return; }
185 }
186 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
187
188 void incrementl(AddressLiteral dst);
189 void incrementl(ArrayAddress dst);
190
191 void incrementq(AddressLiteral dst);
192
193 // Alignment
194 void align(int modulus);
195
196 // A 5 byte nop that is safe for patching (see patch_verified_entry)
197 void fat_nop();
198
199 // Stack frame creation/removal
200 void enter();
201 void leave();
202
203 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
204 // The pointer will be loaded into the thread register.
205 void get_thread(Register thread);
206
207
208 // Support for VM calls
209 //
210 // It is imperative that all calls into the VM are handled via the call_VM macros.
211 // They make sure that the stack linkage is setup correctly. call_VM's correspond
212 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
640 // lock_reg and obj_reg must be loaded up with the appropriate values.
641 // swap_reg must be rax, and is killed.
642 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
643 // be killed; if not supplied, push/pop will be used internally to
644 // allocate a temporary (inefficient, avoid if possible).
645 // Optional slow case is for implementations (interpreter and C1) which branch to
646 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
647 // Returns offset of first potentially-faulting instruction for null
648 // check info (currently consumed only by C1). If
649 // swap_reg_contains_mark is true then returns -1 as it is assumed
650 // the calling code has already passed any potential faults.
651 int biased_locking_enter(Register lock_reg, Register obj_reg,
652 Register swap_reg, Register tmp_reg,
653 bool swap_reg_contains_mark,
654 Label& done, Label* slow_case = NULL,
655 BiasedLockingCounters* counters = NULL);
656 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
657 #ifdef COMPILER2
658 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
659 // See full desription in macroAssembler_x86.cpp.
660 void fast_lock(Register obj, Register box, Register tmp,
661 Register scr, Register cx1, Register cx2,
662 BiasedLockingCounters* counters,
663 RTMLockingCounters* rtmcounters,
664 RTMLockingCounters* stackrtmcounters,
665 Metadata* method_data,
666 bool use_rtm, bool profile_rtm);
667 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
668 #if INCLUDE_RTM_OPT
669 void rtmcounters_update(Register tmp, Register scr);
670 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
671 void rtm_abortratio_calculation(Register box, Register tmp, Register scr,
672 RTMLockingCounters* rtmcounters, Metadata* method_data, bool isStackLock);
673 void rtm_retry_lockabort(Register count, Register box, Register tmp, Label& retryLabel, bool isStackLock);
674 void rtm_retry_lockbusy(Register count, Register box, Register tmp, Register scr, Label& retryLabel);
675 #endif
676 #endif
677
678 Condition negate_condition(Condition cond);
679
680 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
681 // operands. In general the names are modified to avoid hiding the instruction in Assembler
682 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
683 // here in MacroAssembler. The major exception to this rule is call
684
685 // Arithmetics
686
687
688 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
689 void addptr(Address dst, Register src);
690
691 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
692 void addptr(Register dst, int32_t src);
693 void addptr(Register dst, Register src);
694 void addptr(Register dst, RegisterOrConstant src) {
695 if (src.is_constant()) addptr(dst, (int) src.as_constant());
721 void cmpptr(Address src1, AddressLiteral src2);
722
723 void cmpptr(Register src1, AddressLiteral src2);
724
725 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
726 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
727 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
728
729 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
730 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
731
732 // cmp64 to avoild hiding cmpq
733 void cmp64(Register src1, AddressLiteral src);
734
735 void cmpxchgptr(Register reg, Address adr);
736
737 void locked_cmpxchgptr(Register reg, AddressLiteral adr);
738
739
740 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
741 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
742
743
744 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
745
746 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
747
748 void shlptr(Register dst, int32_t shift);
749 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
750
751 void shrptr(Register dst, int32_t shift);
752 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
753
754 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
755 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
756
757 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
758
759 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
760 void subptr(Register dst, int32_t src);
761 // Force generation of a 4 byte immediate value even if it fits into 8bit
763 void subptr(Register dst, Register src);
764 void subptr(Register dst, RegisterOrConstant src) {
765 if (src.is_constant()) subptr(dst, (int) src.as_constant());
766 else subptr(dst, src.as_register());
767 }
768
769 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
770 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
771
772 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
773 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
774
775 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
776
777
778
779 // Helper functions for statistics gathering.
780 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
781 void cond_inc32(Condition cond, AddressLiteral counter_addr);
782 // Unconditional atomic increment.
783 void atomic_incl(Address counter_addr);
784 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
785 #ifdef _LP64
786 void atomic_incq(Address counter_addr);
787 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
788 #endif
789 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
790 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
791
792 void lea(Register dst, AddressLiteral adr);
793 void lea(Address dst, AddressLiteral adr);
794 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
795
796 void leal32(Register dst, Address src) { leal(dst, src); }
797
798 // Import other testl() methods from the parent class or else
799 // they will be hidden by the following overriding declaration.
800 using Assembler::testl;
801 void testl(Register dst, AddressLiteral src);
802
803 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
804 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
805 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
806 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
807
808 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
809 void testptr(Register src1, Register src2);
810
1082 void cmov32( Condition cc, Register dst, Address src);
1083 void cmov32( Condition cc, Register dst, Register src);
1084
1085 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1086
1087 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1088 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1089
1090 void movoop(Register dst, jobject obj);
1091 void movoop(Address dst, jobject obj);
1092
1093 void mov_metadata(Register dst, Metadata* obj);
1094 void mov_metadata(Address dst, Metadata* obj);
1095
1096 void movptr(ArrayAddress dst, Register src);
1097 // can this do an lea?
1098 void movptr(Register dst, ArrayAddress src);
1099
1100 void movptr(Register dst, Address src);
1101
1102 #ifdef _LP64
1103 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1104 #else
1105 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1106 #endif
1107
1108 void movptr(Register dst, intptr_t src);
1109 void movptr(Register dst, Register src);
1110 void movptr(Address dst, intptr_t src);
1111
1112 void movptr(Address dst, Register src);
1113
1114 void movptr(Register dst, RegisterOrConstant src) {
1115 if (src.is_constant()) movptr(dst, src.as_constant());
1116 else movptr(dst, src.as_register());
1117 }
1118
1119 #ifdef _LP64
1120 // Generally the next two are only used for moving NULL
1121 // Although there are situations in initializing the mark word where
1122 // they could be used. They are dangerous.
1123
1124 // They only exist on LP64 so that int32_t and intptr_t are not the same
1125 // and we have ambiguous declarations.
1126
|