11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.hpp"
30 #include "oops/compressedOops.hpp"
31
32 class ciValueKlass;
33
34 // MacroAssembler extends Assembler by frequently used macros.
35 //
36 // Instructions for which a 'better' code sequence exists depending
37 // on arguments should also go in here.
38
39 class MacroAssembler: public Assembler {
40 friend class LIR_Assembler;
41
42 public:
43 using Assembler::mov;
44 using Assembler::movi;
45
46 protected:
47
48 // Support for VM calls
49 //
50 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
1150 }
1151
1152 WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw)
1153
1154 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1155 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1156 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1157 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1158
1159 void adrp(Register reg1, const Address &dest, unsigned long &byte_offset);
1160
1161
1162 enum RegState {
1163 reg_readonly,
1164 reg_writable,
1165 reg_written
1166 };
1167
1168 void verified_entry(Compile* C, int sp_inc);
1169
1170 // Unpack all value type arguments passed as oops
1171 void unpack_value_args(Compile* C, bool receiver_only);
1172 void store_value_type_fields_to_buf(ciValueKlass* vk);
1173
1174 void tableswitch(Register index, jint lowbound, jint highbound,
1175 Label &jumptable, Label &jumptable_end, int stride = 1) {
1176 adr(rscratch1, jumptable);
1177 subsw(rscratch2, index, lowbound);
1178 subsw(zr, rscratch2, highbound - lowbound);
1179 br(Assembler::HS, jumptable_end);
1180 add(rscratch1, rscratch1, rscratch2,
1181 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1182 br(rscratch1);
1183 }
1184
1185 // Form an address from base + offset in Rd. Rd may or may not
1186 // actually be used: you must use the Address that is returned. It
1187 // is up to you to ensure that the shift provided matches the size
1188 // of your data.
1189 Address form_address(Register Rd, Register base, long byte_offset, int shift);
1190
1191 // Return true iff an address is within the 48-bit AArch64 address
1192 // space.
1375 } else {
1376 ldrw(Rx, spill_address(4, offset));
1377 }
1378 }
1379 void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) {
1380 ldr(Vx, T, spill_address(1 << (int)T, offset));
1381 }
1382 void spill_copy128(int src_offset, int dst_offset,
1383 Register tmp1=rscratch1, Register tmp2=rscratch2) {
1384 if (src_offset < 512 && (src_offset & 7) == 0 &&
1385 dst_offset < 512 && (dst_offset & 7) == 0) {
1386 ldp(tmp1, tmp2, Address(sp, src_offset));
1387 stp(tmp1, tmp2, Address(sp, dst_offset));
1388 } else {
1389 unspill(tmp1, true, src_offset);
1390 spill(tmp1, true, dst_offset);
1391 unspill(tmp1, true, src_offset+8);
1392 spill(tmp1, true, dst_offset+8);
1393 }
1394 }
1395 };
1396
1397 #ifdef ASSERT
1398 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1399 #endif
1400
1401 /**
1402 * class SkipIfEqual:
1403 *
1404 * Instantiating this class will result in assembly code being output that will
1405 * jump around any code emitted between the creation of the instance and it's
1406 * automatic destruction at the end of a scope block, depending on the value of
1407 * the flag passed to the constructor, which will be checked at run-time.
1408 */
1409 class SkipIfEqual {
1410 private:
1411 MacroAssembler* _masm;
1412 Label _label;
1413
1414 public:
|
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
28
29 #include "asm/assembler.hpp"
30 #include "oops/compressedOops.hpp"
31 #include "utilities/macros.hpp"
32 #include "runtime/signature.hpp"
33
34
35 class ciValueKlass;
36
37 // MacroAssembler extends Assembler by frequently used macros.
38 //
39 // Instructions for which a 'better' code sequence exists depending
40 // on arguments should also go in here.
41
42 class MacroAssembler: public Assembler {
43 friend class LIR_Assembler;
44
45 public:
46 using Assembler::mov;
47 using Assembler::movi;
48
49 protected:
50
51 // Support for VM calls
52 //
53 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
1153 }
1154
1155 WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw)
1156
1157 void add(Register Rd, Register Rn, RegisterOrConstant increment);
1158 void addw(Register Rd, Register Rn, RegisterOrConstant increment);
1159 void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
1160 void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
1161
1162 void adrp(Register reg1, const Address &dest, unsigned long &byte_offset);
1163
1164
1165 enum RegState {
1166 reg_readonly,
1167 reg_writable,
1168 reg_written
1169 };
1170
1171 void verified_entry(Compile* C, int sp_inc);
1172
1173 int store_value_type_fields_to_buf(ciValueKlass* vk, bool from_interpreter = true);
1174
1175 // Unpack all value type arguments passed as oops
1176 void unpack_value_args(Compile* C, bool receiver_only);
1177 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off, int extra_stack_offset);
1178 bool unpack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, VMReg from, VMRegPair* regs_to, int& to_index,
1179 RegState reg_state[], int ret_off, int extra_stack_offset);
1180 bool pack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1181 VMReg to, VMRegPair* regs_from, int regs_from_count, int& from_index, RegState reg_state[],
1182 int ret_off, int extra_stack_offset);
1183 void restore_stack(Compile* C);
1184
1185 int shuffle_value_args(bool is_packing, bool receiver_only, int extra_stack_offset,
1186 BasicType* sig_bt, const GrowableArray<SigEntry>* sig_cc,
1187 int args_passed, int args_on_stack, VMRegPair* regs,
1188 int args_passed_to, int args_on_stack_to, VMRegPair* regs_to);
1189 bool shuffle_value_args_spill(bool is_packing, const GrowableArray<SigEntry>* sig_cc, int sig_cc_index,
1190 VMRegPair* regs_from, int from_index, int regs_from_count,
1191 RegState* reg_state, int sp_inc, int extra_stack_offset);
1192 VMReg spill_reg_for(VMReg reg);
1193
1194
1195 void tableswitch(Register index, jint lowbound, jint highbound,
1196 Label &jumptable, Label &jumptable_end, int stride = 1) {
1197 adr(rscratch1, jumptable);
1198 subsw(rscratch2, index, lowbound);
1199 subsw(zr, rscratch2, highbound - lowbound);
1200 br(Assembler::HS, jumptable_end);
1201 add(rscratch1, rscratch1, rscratch2,
1202 ext::sxtw, exact_log2(stride * Assembler::instruction_size));
1203 br(rscratch1);
1204 }
1205
1206 // Form an address from base + offset in Rd. Rd may or may not
1207 // actually be used: you must use the Address that is returned. It
1208 // is up to you to ensure that the shift provided matches the size
1209 // of your data.
1210 Address form_address(Register Rd, Register base, long byte_offset, int shift);
1211
1212 // Return true iff an address is within the 48-bit AArch64 address
1213 // space.
1396 } else {
1397 ldrw(Rx, spill_address(4, offset));
1398 }
1399 }
1400 void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) {
1401 ldr(Vx, T, spill_address(1 << (int)T, offset));
1402 }
1403 void spill_copy128(int src_offset, int dst_offset,
1404 Register tmp1=rscratch1, Register tmp2=rscratch2) {
1405 if (src_offset < 512 && (src_offset & 7) == 0 &&
1406 dst_offset < 512 && (dst_offset & 7) == 0) {
1407 ldp(tmp1, tmp2, Address(sp, src_offset));
1408 stp(tmp1, tmp2, Address(sp, dst_offset));
1409 } else {
1410 unspill(tmp1, true, src_offset);
1411 spill(tmp1, true, dst_offset);
1412 unspill(tmp1, true, src_offset+8);
1413 spill(tmp1, true, dst_offset+8);
1414 }
1415 }
1416
1417 #include "asm/macroAssembler_common.hpp"
1418
1419 };
1420
1421 #ifdef ASSERT
1422 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1423 #endif
1424
1425 /**
1426 * class SkipIfEqual:
1427 *
1428 * Instantiating this class will result in assembly code being output that will
1429 * jump around any code emitted between the creation of the instance and it's
1430 * automatic destruction at the end of a scope block, depending on the value of
1431 * the flag passed to the constructor, which will be checked at run-time.
1432 */
1433 class SkipIfEqual {
1434 private:
1435 MacroAssembler* _masm;
1436 Label _label;
1437
1438 public:
|