182
183 u = val << (31 - hi);
184 n = n >> (31 - hi + lo);
185 return n;
186 }
187
188 static inline uint32_t extract(uint32_t val, int msb, int lsb) {
189 int nbits = msb - lsb + 1;
190 assert_cond(msb >= lsb);
191 uint32_t mask = (1U << nbits) - 1;
192 uint32_t result = val >> lsb;
193 result &= mask;
194 return result;
195 }
196
197 static inline int32_t sextract(uint32_t val, int msb, int lsb) {
198 uint32_t uval = extract(val, msb, lsb);
199 return extend(uval, msb - lsb);
200 }
201
202 static void patch(address a, int msb, int lsb, unsigned long val) {
203 int nbits = msb - lsb + 1;
204 guarantee(val < (1U << nbits), "Field too big for insn");
205 assert_cond(msb >= lsb);
206 unsigned mask = (1U << nbits) - 1;
207 val <<= lsb;
208 mask <<= lsb;
209 unsigned target = *(unsigned *)a;
210 target &= ~mask;
211 target |= val;
212 *(unsigned *)a = target;
213 }
214
215 static void spatch(address a, int msb, int lsb, long val) {
216 int nbits = msb - lsb + 1;
217 long chk = val >> (nbits - 1);
218 guarantee (chk == -1 || chk == 0, "Field too big for insn");
219 unsigned uval = val;
220 unsigned mask = (1U << nbits) - 1;
221 uval &= mask;
222 uval <<= lsb;
223 mask <<= lsb;
224 unsigned target = *(unsigned *)a;
225 target &= ~mask;
226 target |= uval;
227 *(unsigned *)a = target;
228 }
229
230 void f(unsigned val, int msb, int lsb) {
231 int nbits = msb - lsb + 1;
232 guarantee(val < (1U << nbits), "Field too big for insn");
233 assert_cond(msb >= lsb);
234 unsigned mask = (1U << nbits) - 1;
235 val <<= lsb;
236 mask <<= lsb;
237 insn |= val;
238 assert_cond((bits & mask) == 0);
239 #ifdef ASSERT
240 bits |= mask;
241 #endif
242 }
243
244 void f(unsigned val, int bit) {
245 f(val, bit, bit);
246 }
247
248 void sf(long val, int msb, int lsb) {
249 int nbits = msb - lsb + 1;
250 long chk = val >> (nbits - 1);
251 guarantee (chk == -1 || chk == 0, "Field too big for insn");
252 unsigned uval = val;
253 unsigned mask = (1U << nbits) - 1;
254 uval &= mask;
255 f(uval, lsb + nbits - 1, lsb);
256 }
257
258 void rf(Register r, int lsb) {
259 f(r->encoding_nocheck(), lsb + 4, lsb);
260 }
261
262 // reg|ZR
263 void zrf(Register r, int lsb) {
264 f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb);
265 }
266
267 // reg|SP
268 void srf(Register r, int lsb) {
269 f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb);
270 }
340 class uxtw : public extend {
341 public:
342 uxtw(int shift = -1): extend(shift, 0b010, ext::uxtw) { }
343 };
344 class lsl : public extend {
345 public:
346 lsl(int shift = -1): extend(shift, 0b011, ext::uxtx) { }
347 };
348 class sxtw : public extend {
349 public:
350 sxtw(int shift = -1): extend(shift, 0b110, ext::sxtw) { }
351 };
352 class sxtx : public extend {
353 public:
354 sxtx(int shift = -1): extend(shift, 0b111, ext::sxtx) { }
355 };
356
357 private:
358 Register _base;
359 Register _index;
360 long _offset;
361 enum mode _mode;
362 extend _ext;
363
364 RelocationHolder _rspec;
365
366 // Typically we use AddressLiterals we want to use their rval
367 // However in some situations we want the lval (effect address) of
368 // the item. We provide a special factory for making those lvals.
369 bool _is_lval;
370
371 // If the target is far we'll need to load the ea of this to a
372 // register to reach it. Otherwise if near we can do PC-relative
373 // addressing.
374 address _target;
375
376 public:
377 Address()
378 : _mode(no_mode) { }
379 Address(Register r)
380 : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { }
381 Address(Register r, int o)
382 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
383 Address(Register r, long o)
384 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
385 Address(Register r, unsigned long o)
386 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
387 #ifdef ASSERT
388 Address(Register r, ByteSize disp)
389 : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { }
390 #endif
391 Address(Register r, Register r1, extend ext = lsl())
392 : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg),
393 _ext(ext), _target(0) { }
394 Address(Pre p)
395 : _base(p.reg()), _offset(p.offset()), _mode(pre) { }
396 Address(Post p)
397 : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()),
398 _mode(p.is_postreg() ? post_reg : post), _target(0) { }
399 Address(address target, RelocationHolder const& rspec)
400 : _mode(literal),
401 _rspec(rspec),
402 _is_lval(false),
403 _target(target) { }
404 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type);
405 Address(Register base, RegisterOrConstant index, extend ext = lsl())
406 : _base (base),
407 _offset(0), _ext(ext), _target(0) {
408 if (index.is_register()) {
409 _mode = base_plus_offset_reg;
410 _index = index.as_register();
411 } else {
412 guarantee(ext.option() == ext::uxtx, "should be");
413 assert(index.is_constant(), "should be");
414 _mode = base_plus_offset;
415 _offset = index.as_constant() << ext.shift();
416 }
417 }
418
419 Register base() const {
420 guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg
421 | _mode == post | _mode == post_reg),
422 "wrong mode");
423 return _base;
424 }
425 long offset() const {
426 return _offset;
427 }
428 Register index() const {
429 return _index;
430 }
431 mode getMode() const {
432 return _mode;
433 }
434 bool uses(Register reg) const { return _base == reg || _index == reg; }
435 address target() const { return _target; }
436 const RelocationHolder& rspec() const { return _rspec; }
437
438 void encode(Instruction_aarch64 *i) const {
439 i->f(0b111, 29, 27);
440 i->srf(_base, 5);
441
442 switch(_mode) {
443 case base_plus_offset:
444 {
445 unsigned size = i->get(31, 30);
537 size = 4 << size;
538 guarantee(_offset % size == 0, "bad offset");
539 i->sf(_offset / size, 21, 15);
540 i->srf(_base, 5);
541 }
542
543 void encode_nontemporal_pair(Instruction_aarch64 *i) const {
544 // Only base + offset is allowed
545 i->f(0b000, 25, 23);
546 unsigned size = i->get(31, 31);
547 size = 4 << size;
548 guarantee(_offset % size == 0, "bad offset");
549 i->sf(_offset / size, 21, 15);
550 i->srf(_base, 5);
551 guarantee(_mode == Address::base_plus_offset,
552 "Bad addressing mode for non-temporal op");
553 }
554
555 void lea(MacroAssembler *, Register) const;
556
557 static bool offset_ok_for_immed(long offset, int shift) {
558 unsigned mask = (1 << shift) - 1;
559 if (offset < 0 || offset & mask) {
560 return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset
561 } else {
562 return ((offset >> shift) < (1 << (21 - 10 + 1))); // Scaled, unsigned offset
563 }
564 }
565 };
566
567 // Convience classes
568 class RuntimeAddress: public Address {
569
570 public:
571
572 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {}
573
574 };
575
576 class OopAddress: public Address {
577
599
600 class InternalAddress: public Address {
601
602 public:
603
604 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {}
605 };
606
607 const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers *
608 FloatRegisterImpl::save_slots_per_register;
609
610 typedef enum {
611 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
612 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM,
613 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM
614 } prfop;
615
616 class Assembler : public AbstractAssembler {
617
618 #ifndef PRODUCT
619 static const unsigned long asm_bp;
620
621 void emit_long(jint x) {
622 if ((unsigned long)pc() == asm_bp)
623 asm volatile ("nop");
624 AbstractAssembler::emit_int32(x);
625 }
626 #else
627 void emit_long(jint x) {
628 AbstractAssembler::emit_int32(x);
629 }
630 #endif
631
632 public:
633
634 enum { instruction_size = 4 };
635
636 //---< calculate length of instruction >---
637 // We just use the values set above.
638 // instruction must start at passed address
639 static unsigned int instr_len(unsigned char *instr) { return instruction_size; }
640
641 //---< longest instructions >---
642 static unsigned int instr_maxlen() { return instruction_size; }
643
644 Address adjust(Register base, int offset, bool preIncrement) {
645 if (preIncrement)
646 return Address(Pre(base, offset));
647 else
648 return Address(Post(base, offset));
649 }
650
651 Address pre(Register base, int offset) {
652 return adjust(base, offset, true);
653 }
654
655 Address post(Register base, int offset) {
656 return adjust(base, offset, false);
657 }
658
659 Address post(Register base, Register idx) {
660 return Address(Post(base, idx));
661 }
662
663 Instruction_aarch64* current;
664
665 void set_current(Instruction_aarch64* i) { current = i; }
666
667 void f(unsigned val, int msb, int lsb) {
668 current->f(val, msb, lsb);
669 }
670 void f(unsigned val, int msb) {
671 current->f(val, msb, msb);
672 }
673 void sf(long val, int msb, int lsb) {
674 current->sf(val, msb, lsb);
675 }
676 void rf(Register reg, int lsb) {
677 current->rf(reg, lsb);
678 }
679 void srf(Register reg, int lsb) {
680 current->srf(reg, lsb);
681 }
682 void zrf(Register reg, int lsb) {
683 current->zrf(reg, lsb);
684 }
685 void rf(FloatRegister reg, int lsb) {
686 current->rf(reg, lsb);
687 }
688 void fixed(unsigned value, unsigned mask) {
689 current->fixed(value, mask);
690 }
691
692 void emit() {
693 emit_long(current->get_insn());
703 void wrap_label(Label &L, uncond_branch_insn insn);
704 void wrap_label(Register r, Label &L, compare_and_branch_insn insn);
705 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn);
706 void wrap_label(Label &L, prfop, prefetch_insn insn);
707
708 // PC-rel. addressing
709
710 void adr(Register Rd, address dest);
711 void _adrp(Register Rd, address dest);
712
713 void adr(Register Rd, const Address &dest);
714 void _adrp(Register Rd, const Address &dest);
715
716 void adr(Register Rd, Label &L) {
717 wrap_label(Rd, L, &Assembler::Assembler::adr);
718 }
719 void _adrp(Register Rd, Label &L) {
720 wrap_label(Rd, L, &Assembler::_adrp);
721 }
722
723 void adrp(Register Rd, const Address &dest, unsigned long &offset);
724
725 #undef INSN
726
727 void add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op,
728 int negated_op);
729
730 // Add/subtract (immediate)
731 #define INSN(NAME, decode, negated) \
732 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \
733 starti; \
734 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \
735 zrf(Rd, 0), srf(Rn, 5); \
736 } \
737 \
738 void NAME(Register Rd, Register Rn, unsigned imm) { \
739 starti; \
740 add_sub_immediate(Rd, Rn, imm, decode, negated); \
741 }
742
743 INSN(addsw, 0b001, 0b011);
829 #undef INSN
830
831 // Extract
832 #define INSN(NAME, opcode, size) \
833 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \
834 starti; \
835 guarantee(size == 1 || imms < 32, "incorrect imms"); \
836 f(opcode, 31, 21), f(imms, 15, 10); \
837 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
838 }
839
840 INSN(extrw, 0b00010011100, 0);
841 INSN(extr, 0b10010011110, 1);
842
843 #undef INSN
844
845 // The maximum range of a branch is fixed for the AArch64
846 // architecture. In debug mode we shrink it in order to test
847 // trampolines, but not so small that branches in the interpreter
848 // are out of range.
849 static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
850
851 static bool reachable_from_branch_at(address branch, address target) {
852 return uabs(target - branch) < branch_range;
853 }
854
855 // Unconditional branch (immediate)
856 #define INSN(NAME, opcode) \
857 void NAME(address dest) { \
858 starti; \
859 long offset = (dest - pc()) >> 2; \
860 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \
861 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \
862 } \
863 void NAME(Label &L) { \
864 wrap_label(L, &Assembler::NAME); \
865 } \
866 void NAME(const Address &dest);
867
868 INSN(b, 0);
869 INSN(bl, 1);
870
871 #undef INSN
872
873 // Compare & branch (immediate)
874 #define INSN(NAME, opcode) \
875 void NAME(Register Rt, address dest) { \
876 long offset = (dest - pc()) >> 2; \
877 starti; \
878 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \
879 } \
880 void NAME(Register Rt, Label &L) { \
881 wrap_label(Rt, L, &Assembler::NAME); \
882 }
883
884 INSN(cbzw, 0b00110100);
885 INSN(cbnzw, 0b00110101);
886 INSN(cbz, 0b10110100);
887 INSN(cbnz, 0b10110101);
888
889 #undef INSN
890
891 // Test & branch (immediate)
892 #define INSN(NAME, opcode) \
893 void NAME(Register Rt, int bitpos, address dest) { \
894 long offset = (dest - pc()) >> 2; \
895 int b5 = bitpos >> 5; \
896 bitpos &= 0x1f; \
897 starti; \
898 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \
899 rf(Rt, 0); \
900 } \
901 void NAME(Register Rt, int bitpos, Label &L) { \
902 wrap_label(Rt, bitpos, L, &Assembler::NAME); \
903 }
904
905 INSN(tbz, 0b0110110);
906 INSN(tbnz, 0b0110111);
907
908 #undef INSN
909
910 // Conditional branch (immediate)
911 enum Condition
912 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
913
914 void br(Condition cond, address dest) {
915 long offset = (dest - pc()) >> 2;
916 starti;
917 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
918 }
919
920 #define INSN(NAME, cond) \
921 void NAME(address dest) { \
922 br(cond, dest); \
923 }
924
925 INSN(beq, EQ);
926 INSN(bne, NE);
927 INSN(bhs, HS);
928 INSN(bcs, CS);
929 INSN(blo, LO);
930 INSN(bcc, CC);
931 INSN(bmi, MI);
932 INSN(bpl, PL);
933 INSN(bvs, VS);
934 INSN(bvc, VC);
935 INSN(bhi, HI);
1275 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1276 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \
1277 } \
1278 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\
1279 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \
1280 }
1281 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000);
1282 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001);
1283 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010);
1284 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011);
1285 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100);
1286 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101);
1287 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110);
1288 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111);
1289 INSN(swp, swpa, swpl, swpal, 1, 0b000);
1290 #undef INSN
1291
1292 // Load register (literal)
1293 #define INSN(NAME, opc, V) \
1294 void NAME(Register Rt, address dest) { \
1295 long offset = (dest - pc()) >> 2; \
1296 starti; \
1297 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1298 sf(offset, 23, 5); \
1299 rf(Rt, 0); \
1300 } \
1301 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \
1302 InstructionMark im(this); \
1303 guarantee(rtype == relocInfo::internal_word_type, \
1304 "only internal_word_type relocs make sense here"); \
1305 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \
1306 NAME(Rt, dest); \
1307 } \
1308 void NAME(Register Rt, Label &L) { \
1309 wrap_label(Rt, L, &Assembler::NAME); \
1310 }
1311
1312 INSN(ldrw, 0b00, 0);
1313 INSN(ldr, 0b01, 0);
1314 INSN(ldrsw, 0b10, 0);
1315
1316 #undef INSN
1317
1318 #define INSN(NAME, opc, V) \
1319 void NAME(FloatRegister Rt, address dest) { \
1320 long offset = (dest - pc()) >> 2; \
1321 starti; \
1322 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1323 sf(offset, 23, 5); \
1324 rf((Register)Rt, 0); \
1325 }
1326
1327 INSN(ldrs, 0b00, 1);
1328 INSN(ldrd, 0b01, 1);
1329 INSN(ldrq, 0b10, 1);
1330
1331 #undef INSN
1332
1333 #define INSN(NAME, opc, V) \
1334 void NAME(address dest, prfop op = PLDL1KEEP) { \
1335 long offset = (dest - pc()) >> 2; \
1336 starti; \
1337 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1338 sf(offset, 23, 5); \
1339 f(op, 4, 0); \
1340 } \
1341 void NAME(Label &L, prfop op = PLDL1KEEP) { \
1342 wrap_label(L, op, &Assembler::NAME); \
1343 }
1344
1345 INSN(prfm, 0b11, 0);
1346
1347 #undef INSN
1348
1349 // Load/store
1350 void ld_st1(int opc, int p1, int V, int L,
1351 Register Rt1, Register Rt2, Address adr, bool no_allocate) {
1352 starti;
1353 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22);
1354 zrf(Rt2, 10), zrf(Rt1, 0);
1355 if (no_allocate) {
1391 INSN(stpq, 0b10, 0b101, 1, 0, false);
1392 INSN(ldpq, 0b10, 0b101, 1, 1, false);
1393
1394 #undef INSN
1395
1396 // Load/store register (all modes)
1397 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) {
1398 starti;
1399
1400 f(V, 26); // general reg?
1401 zrf(Rt, 0);
1402
1403 // Encoding for literal loads is done here (rather than pushed
1404 // down into Address::encode) because the encoding of this
1405 // instruction is too different from all of the other forms to
1406 // make it worth sharing.
1407 if (adr.getMode() == Address::literal) {
1408 assert(size == 0b10 || size == 0b11, "bad operand size in ldr");
1409 assert(op == 0b01, "literal form can only be used with loads");
1410 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
1411 long offset = (adr.target() - pc()) >> 2;
1412 sf(offset, 23, 5);
1413 code_section()->relocate(pc(), adr.rspec());
1414 return;
1415 }
1416
1417 f(size, 31, 30);
1418 f(op, 23, 22); // str
1419 adr.encode(current);
1420 }
1421
1422 #define INSN(NAME, size, op) \
1423 void NAME(Register Rt, const Address &adr) { \
1424 ld_st2(Rt, adr, size, op); \
1425 } \
1426
1427 INSN(str, 0b11, 0b00);
1428 INSN(strw, 0b10, 0b00);
1429 INSN(strb, 0b00, 0b00);
1430 INSN(strh, 0b01, 0b00);
1431
1512 \
1513 /* These instructions have no immediate form. Provide an overload so \
1514 that if anyone does try to use an immediate operand -- this has \
1515 happened! -- we'll get a compile-time error. */ \
1516 void NAME(Register Rd, Register Rn, unsigned imm, \
1517 enum shift_kind kind = LSL, unsigned shift = 0) { \
1518 assert(false, " can't be used with immediate operand"); \
1519 }
1520
1521 INSN(bic, 1, 0b00, 1);
1522 INSN(orn, 1, 0b01, 1);
1523 INSN(eon, 1, 0b10, 1);
1524 INSN(bics, 1, 0b11, 1);
1525 INSN(bicw, 0, 0b00, 1);
1526 INSN(ornw, 0, 0b01, 1);
1527 INSN(eonw, 0, 0b10, 1);
1528 INSN(bicsw, 0, 0b11, 1);
1529
1530 #undef INSN
1531
1532 // Aliases for short forms of orn
1533 void mvn(Register Rd, Register Rm,
1534 enum shift_kind kind = LSL, unsigned shift = 0) {
1535 orn(Rd, zr, Rm, kind, shift);
1536 }
1537
1538 void mvnw(Register Rd, Register Rm,
1539 enum shift_kind kind = LSL, unsigned shift = 0) {
1540 ornw(Rd, zr, Rm, kind, shift);
1541 }
1542
1543 // Add/subtract (shifted register)
1544 #define INSN(NAME, size, op) \
1545 void NAME(Register Rd, Register Rn, Register Rm, \
1546 enum shift_kind kind, unsigned shift = 0) { \
1547 starti; \
1548 f(0, 21); \
1549 assert_cond(kind != ROR); \
1550 guarantee(size == 1 || shift < 32, "incorrect shift");\
1551 zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \
2666 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value");
2667 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21);
2668 rf(Vm, 16), f(0, 15), f(index, 14, 11);
2669 f(0, 10), rf(Vn, 5), rf(Vd, 0);
2670 }
2671
2672 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
2673 }
2674
2675 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
2676 Register tmp,
2677 int offset) {
2678 ShouldNotCallThis();
2679 return RegisterOrConstant();
2680 }
2681
2682 // Stack overflow checking
2683 virtual void bang_stack_with_offset(int offset);
2684
2685 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
2686 static bool operand_valid_for_add_sub_immediate(long imm);
2687 static bool operand_valid_for_float_immediate(double imm);
2688
2689 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
2690 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
2691 };
2692
2693 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
2694 Assembler::Membar_mask_bits b) {
2695 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
2696 }
2697
2698 Instruction_aarch64::~Instruction_aarch64() {
2699 assem->emit();
2700 }
2701
2702 #undef starti
2703
2704 // Invert a condition
2705 inline const Assembler::Condition operator~(const Assembler::Condition cond) {
2706 return Assembler::Condition(int(cond) ^ 1);
|
182
183 u = val << (31 - hi);
184 n = n >> (31 - hi + lo);
185 return n;
186 }
187
188 static inline uint32_t extract(uint32_t val, int msb, int lsb) {
189 int nbits = msb - lsb + 1;
190 assert_cond(msb >= lsb);
191 uint32_t mask = (1U << nbits) - 1;
192 uint32_t result = val >> lsb;
193 result &= mask;
194 return result;
195 }
196
197 static inline int32_t sextract(uint32_t val, int msb, int lsb) {
198 uint32_t uval = extract(val, msb, lsb);
199 return extend(uval, msb - lsb);
200 }
201
202 static void patch(address a, int msb, int lsb, uint64_t val) {
203 int nbits = msb - lsb + 1;
204 guarantee(val < (1U << nbits), "Field too big for insn");
205 assert_cond(msb >= lsb);
206 unsigned mask = (1U << nbits) - 1;
207 val <<= lsb;
208 mask <<= lsb;
209 unsigned target = *(unsigned *)a;
210 target &= ~mask;
211 target |= val;
212 *(unsigned *)a = target;
213 }
214
215 static void spatch(address a, int msb, int lsb, int64_t val) {
216 int nbits = msb - lsb + 1;
217 int64_t chk = val >> (nbits - 1);
218 guarantee (chk == -1 || chk == 0, "Field too big for insn");
219 unsigned uval = val;
220 unsigned mask = (1U << nbits) - 1;
221 uval &= mask;
222 uval <<= lsb;
223 mask <<= lsb;
224 unsigned target = *(unsigned *)a;
225 target &= ~mask;
226 target |= uval;
227 *(unsigned *)a = target;
228 }
229
230 void f(unsigned val, int msb, int lsb) {
231 int nbits = msb - lsb + 1;
232 guarantee(val < (1U << nbits), "Field too big for insn");
233 assert_cond(msb >= lsb);
234 unsigned mask = (1U << nbits) - 1;
235 val <<= lsb;
236 mask <<= lsb;
237 insn |= val;
238 assert_cond((bits & mask) == 0);
239 #ifdef ASSERT
240 bits |= mask;
241 #endif
242 }
243
244 void f(unsigned val, int bit) {
245 f(val, bit, bit);
246 }
247
248 void sf(int64_t val, int msb, int lsb) {
249 int nbits = msb - lsb + 1;
250 int64_t chk = val >> (nbits - 1);
251 guarantee (chk == -1 || chk == 0, "Field too big for insn");
252 unsigned uval = val;
253 unsigned mask = (1U << nbits) - 1;
254 uval &= mask;
255 f(uval, lsb + nbits - 1, lsb);
256 }
257
258 void rf(Register r, int lsb) {
259 f(r->encoding_nocheck(), lsb + 4, lsb);
260 }
261
262 // reg|ZR
263 void zrf(Register r, int lsb) {
264 f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb);
265 }
266
267 // reg|SP
268 void srf(Register r, int lsb) {
269 f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb);
270 }
340 class uxtw : public extend {
341 public:
342 uxtw(int shift = -1): extend(shift, 0b010, ext::uxtw) { }
343 };
344 class lsl : public extend {
345 public:
346 lsl(int shift = -1): extend(shift, 0b011, ext::uxtx) { }
347 };
348 class sxtw : public extend {
349 public:
350 sxtw(int shift = -1): extend(shift, 0b110, ext::sxtw) { }
351 };
352 class sxtx : public extend {
353 public:
354 sxtx(int shift = -1): extend(shift, 0b111, ext::sxtx) { }
355 };
356
357 private:
358 Register _base;
359 Register _index;
360 int64_t _offset;
361 enum mode _mode;
362 extend _ext;
363
364 RelocationHolder _rspec;
365
366 // Typically we use AddressLiterals we want to use their rval
367 // However in some situations we want the lval (effect address) of
368 // the item. We provide a special factory for making those lvals.
369 bool _is_lval;
370
371 // If the target is far we'll need to load the ea of this to a
372 // register to reach it. Otherwise if near we can do PC-relative
373 // addressing.
374 address _target;
375
376 public:
377 Address()
378 : _mode(no_mode) { }
379 Address(Register r)
380 : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { }
381 Address(Register r, int o)
382 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
383 Address(Register r, int64_t o)
384 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
385 Address(Register r, uint64_t o)
386 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
387 #ifdef ASSERT
388 Address(Register r, ByteSize disp)
389 : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { }
390 #endif
391 Address(Register r, Register r1, extend ext = lsl())
392 : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg),
393 _ext(ext), _target(0) { }
394 Address(Pre p)
395 : _base(p.reg()), _offset(p.offset()), _mode(pre) { }
396 Address(Post p)
397 : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()),
398 _mode(p.is_postreg() ? post_reg : post), _target(0) { }
399 Address(address target, RelocationHolder const& rspec)
400 : _mode(literal),
401 _rspec(rspec),
402 _is_lval(false),
403 _target(target) { }
404 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type);
405 Address(Register base, RegisterOrConstant index, extend ext = lsl())
406 : _base (base),
407 _offset(0), _ext(ext), _target(0) {
408 if (index.is_register()) {
409 _mode = base_plus_offset_reg;
410 _index = index.as_register();
411 } else {
412 guarantee(ext.option() == ext::uxtx, "should be");
413 assert(index.is_constant(), "should be");
414 _mode = base_plus_offset;
415 _offset = index.as_constant() << ext.shift();
416 }
417 }
418
419 Register base() const {
420 guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg
421 | _mode == post | _mode == post_reg),
422 "wrong mode");
423 return _base;
424 }
425 int64_t offset() const {
426 return _offset;
427 }
428 Register index() const {
429 return _index;
430 }
431 mode getMode() const {
432 return _mode;
433 }
434 bool uses(Register reg) const { return _base == reg || _index == reg; }
435 address target() const { return _target; }
436 const RelocationHolder& rspec() const { return _rspec; }
437
438 void encode(Instruction_aarch64 *i) const {
439 i->f(0b111, 29, 27);
440 i->srf(_base, 5);
441
442 switch(_mode) {
443 case base_plus_offset:
444 {
445 unsigned size = i->get(31, 30);
537 size = 4 << size;
538 guarantee(_offset % size == 0, "bad offset");
539 i->sf(_offset / size, 21, 15);
540 i->srf(_base, 5);
541 }
542
543 void encode_nontemporal_pair(Instruction_aarch64 *i) const {
544 // Only base + offset is allowed
545 i->f(0b000, 25, 23);
546 unsigned size = i->get(31, 31);
547 size = 4 << size;
548 guarantee(_offset % size == 0, "bad offset");
549 i->sf(_offset / size, 21, 15);
550 i->srf(_base, 5);
551 guarantee(_mode == Address::base_plus_offset,
552 "Bad addressing mode for non-temporal op");
553 }
554
555 void lea(MacroAssembler *, Register) const;
556
557 static bool offset_ok_for_immed(int64_t offset, int shift) {
558 unsigned mask = (1 << shift) - 1;
559 if (offset < 0 || offset & mask) {
560 return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset
561 } else {
562 return ((offset >> shift) < (1 << (21 - 10 + 1))); // Scaled, unsigned offset
563 }
564 }
565 };
566
567 // Convience classes
568 class RuntimeAddress: public Address {
569
570 public:
571
572 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {}
573
574 };
575
576 class OopAddress: public Address {
577
599
600 class InternalAddress: public Address {
601
602 public:
603
604 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {}
605 };
606
607 const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers *
608 FloatRegisterImpl::save_slots_per_register;
609
610 typedef enum {
611 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
612 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM,
613 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM
614 } prfop;
615
616 class Assembler : public AbstractAssembler {
617
618 #ifndef PRODUCT
619 static const uint64_t asm_bp;
620
621 void emit_long(jint x) {
622 if ((uint64_t)pc() == asm_bp) {
623 #ifdef _WIN64
624 // MSVC built-in: https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019#I
625 __nop();
626 #else
627 asm volatile ("nop");
628 #endif
629 }
630 AbstractAssembler::emit_int32(x);
631 }
632 #else
633 void emit_long(jint x) {
634 AbstractAssembler::emit_int32(x);
635 }
636 #endif
637
638 public:
639
640 enum { instruction_size = 4 };
641
642 //---< calculate length of instruction >---
643 // We just use the values set above.
644 // instruction must start at passed address
645 static unsigned int instr_len(unsigned char *instr) { return instruction_size; }
646
647 //---< longest instructions >---
648 static unsigned int instr_maxlen() { return instruction_size; }
649
650 Address adjust(Register base, int offset, bool preIncrement) {
651 if (preIncrement)
652 return Address(Pre(base, offset));
653 else
654 return Address(Post(base, offset));
655 }
656
657 Address pre(Register base, int offset) {
658 return adjust(base, offset, true);
659 }
660
661 Address post(Register base, int offset) {
662 return adjust(base, offset, false);
663 }
664
665 Address post(Register base, Register idx) {
666 return Address(Post(base, idx));
667 }
668
669 static address locate_next_instruction(address inst);
670
671 Instruction_aarch64* current;
672
673 void set_current(Instruction_aarch64* i) { current = i; }
674
675 void f(unsigned val, int msb, int lsb) {
676 current->f(val, msb, lsb);
677 }
678 void f(unsigned val, int msb) {
679 current->f(val, msb, msb);
680 }
681 void sf(int64_t val, int msb, int lsb) {
682 current->sf(val, msb, lsb);
683 }
684 void rf(Register reg, int lsb) {
685 current->rf(reg, lsb);
686 }
687 void srf(Register reg, int lsb) {
688 current->srf(reg, lsb);
689 }
690 void zrf(Register reg, int lsb) {
691 current->zrf(reg, lsb);
692 }
693 void rf(FloatRegister reg, int lsb) {
694 current->rf(reg, lsb);
695 }
696 void fixed(unsigned value, unsigned mask) {
697 current->fixed(value, mask);
698 }
699
700 void emit() {
701 emit_long(current->get_insn());
711 void wrap_label(Label &L, uncond_branch_insn insn);
712 void wrap_label(Register r, Label &L, compare_and_branch_insn insn);
713 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn);
714 void wrap_label(Label &L, prfop, prefetch_insn insn);
715
716 // PC-rel. addressing
717
718 void adr(Register Rd, address dest);
719 void _adrp(Register Rd, address dest);
720
721 void adr(Register Rd, const Address &dest);
722 void _adrp(Register Rd, const Address &dest);
723
724 void adr(Register Rd, Label &L) {
725 wrap_label(Rd, L, &Assembler::Assembler::adr);
726 }
727 void _adrp(Register Rd, Label &L) {
728 wrap_label(Rd, L, &Assembler::_adrp);
729 }
730
731 void adrp(Register Rd, const Address &dest, uint64_t &offset);
732
733 #undef INSN
734
735 void add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op,
736 int negated_op);
737
738 // Add/subtract (immediate)
739 #define INSN(NAME, decode, negated) \
740 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \
741 starti; \
742 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \
743 zrf(Rd, 0), srf(Rn, 5); \
744 } \
745 \
746 void NAME(Register Rd, Register Rn, unsigned imm) { \
747 starti; \
748 add_sub_immediate(Rd, Rn, imm, decode, negated); \
749 }
750
751 INSN(addsw, 0b001, 0b011);
837 #undef INSN
838
839 // Extract
840 #define INSN(NAME, opcode, size) \
841 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \
842 starti; \
843 guarantee(size == 1 || imms < 32, "incorrect imms"); \
844 f(opcode, 31, 21), f(imms, 15, 10); \
845 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
846 }
847
848 INSN(extrw, 0b00010011100, 0);
849 INSN(extr, 0b10010011110, 1);
850
851 #undef INSN
852
853 // The maximum range of a branch is fixed for the AArch64
854 // architecture. In debug mode we shrink it in order to test
855 // trampolines, but not so small that branches in the interpreter
856 // are out of range.
857 static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
858
859 static bool reachable_from_branch_at(address branch, address target) {
860 return uabs(target - branch) < branch_range;
861 }
862
863 // Unconditional branch (immediate)
864 #define INSN(NAME, opcode) \
865 void NAME(address dest) { \
866 starti; \
867 int64_t offset = (dest - pc()) >> 2; \
868 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \
869 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \
870 } \
871 void NAME(Label &L) { \
872 wrap_label(L, &Assembler::NAME); \
873 } \
874 void NAME(const Address &dest);
875
876 INSN(b, 0);
877 INSN(bl, 1);
878
879 #undef INSN
880
881 // Compare & branch (immediate)
882 #define INSN(NAME, opcode) \
883 void NAME(Register Rt, address dest) { \
884 int64_t offset = (dest - pc()) >> 2; \
885 starti; \
886 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \
887 } \
888 void NAME(Register Rt, Label &L) { \
889 wrap_label(Rt, L, &Assembler::NAME); \
890 }
891
892 INSN(cbzw, 0b00110100);
893 INSN(cbnzw, 0b00110101);
894 INSN(cbz, 0b10110100);
895 INSN(cbnz, 0b10110101);
896
897 #undef INSN
898
899 // Test & branch (immediate)
900 #define INSN(NAME, opcode) \
901 void NAME(Register Rt, int bitpos, address dest) { \
902 int64_t offset = (dest - pc()) >> 2; \
903 int b5 = bitpos >> 5; \
904 bitpos &= 0x1f; \
905 starti; \
906 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \
907 rf(Rt, 0); \
908 } \
909 void NAME(Register Rt, int bitpos, Label &L) { \
910 wrap_label(Rt, bitpos, L, &Assembler::NAME); \
911 }
912
913 INSN(tbz, 0b0110110);
914 INSN(tbnz, 0b0110111);
915
916 #undef INSN
917
918 // Conditional branch (immediate)
919 enum Condition
920 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
921
922 void br(Condition cond, address dest) {
923 int64_t offset = (dest - pc()) >> 2;
924 starti;
925 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
926 }
927
928 #define INSN(NAME, cond) \
929 void NAME(address dest) { \
930 br(cond, dest); \
931 }
932
933 INSN(beq, EQ);
934 INSN(bne, NE);
935 INSN(bhs, HS);
936 INSN(bcs, CS);
937 INSN(blo, LO);
938 INSN(bcc, CC);
939 INSN(bmi, MI);
940 INSN(bpl, PL);
941 INSN(bvs, VS);
942 INSN(bvc, VC);
943 INSN(bhi, HI);
1283 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1284 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \
1285 } \
1286 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\
1287 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \
1288 }
1289 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000);
1290 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001);
1291 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010);
1292 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011);
1293 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100);
1294 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101);
1295 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110);
1296 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111);
1297 INSN(swp, swpa, swpl, swpal, 1, 0b000);
1298 #undef INSN
1299
1300 // Load register (literal)
1301 #define INSN(NAME, opc, V) \
1302 void NAME(Register Rt, address dest) { \
1303 int64_t offset = (dest - pc()) >> 2; \
1304 starti; \
1305 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1306 sf(offset, 23, 5); \
1307 rf(Rt, 0); \
1308 } \
1309 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \
1310 InstructionMark im(this); \
1311 guarantee(rtype == relocInfo::internal_word_type, \
1312 "only internal_word_type relocs make sense here"); \
1313 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \
1314 NAME(Rt, dest); \
1315 } \
1316 void NAME(Register Rt, Label &L) { \
1317 wrap_label(Rt, L, &Assembler::NAME); \
1318 }
1319
1320 INSN(ldrw, 0b00, 0);
1321 INSN(ldr, 0b01, 0);
1322 INSN(ldrsw, 0b10, 0);
1323
1324 #undef INSN
1325
1326 #define INSN(NAME, opc, V) \
1327 void NAME(FloatRegister Rt, address dest) { \
1328 int64_t offset = (dest - pc()) >> 2; \
1329 starti; \
1330 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1331 sf(offset, 23, 5); \
1332 rf((Register)Rt, 0); \
1333 }
1334
1335 INSN(ldrs, 0b00, 1);
1336 INSN(ldrd, 0b01, 1);
1337 INSN(ldrq, 0b10, 1);
1338
1339 #undef INSN
1340
1341 #define INSN(NAME, opc, V) \
1342 void NAME(address dest, prfop op = PLDL1KEEP) { \
1343 int64_t offset = (dest - pc()) >> 2; \
1344 starti; \
1345 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1346 sf(offset, 23, 5); \
1347 f(op, 4, 0); \
1348 } \
1349 void NAME(Label &L, prfop op = PLDL1KEEP) { \
1350 wrap_label(L, op, &Assembler::NAME); \
1351 }
1352
1353 INSN(prfm, 0b11, 0);
1354
1355 #undef INSN
1356
1357 // Load/store
1358 void ld_st1(int opc, int p1, int V, int L,
1359 Register Rt1, Register Rt2, Address adr, bool no_allocate) {
1360 starti;
1361 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22);
1362 zrf(Rt2, 10), zrf(Rt1, 0);
1363 if (no_allocate) {
1399 INSN(stpq, 0b10, 0b101, 1, 0, false);
1400 INSN(ldpq, 0b10, 0b101, 1, 1, false);
1401
1402 #undef INSN
1403
1404 // Load/store register (all modes)
1405 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) {
1406 starti;
1407
1408 f(V, 26); // general reg?
1409 zrf(Rt, 0);
1410
1411 // Encoding for literal loads is done here (rather than pushed
1412 // down into Address::encode) because the encoding of this
1413 // instruction is too different from all of the other forms to
1414 // make it worth sharing.
1415 if (adr.getMode() == Address::literal) {
1416 assert(size == 0b10 || size == 0b11, "bad operand size in ldr");
1417 assert(op == 0b01, "literal form can only be used with loads");
1418 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
1419 int64_t offset = (adr.target() - pc()) >> 2;
1420 sf(offset, 23, 5);
1421 code_section()->relocate(pc(), adr.rspec());
1422 return;
1423 }
1424
1425 f(size, 31, 30);
1426 f(op, 23, 22); // str
1427 adr.encode(current);
1428 }
1429
1430 #define INSN(NAME, size, op) \
1431 void NAME(Register Rt, const Address &adr) { \
1432 ld_st2(Rt, adr, size, op); \
1433 } \
1434
1435 INSN(str, 0b11, 0b00);
1436 INSN(strw, 0b10, 0b00);
1437 INSN(strb, 0b00, 0b00);
1438 INSN(strh, 0b01, 0b00);
1439
1520 \
1521 /* These instructions have no immediate form. Provide an overload so \
1522 that if anyone does try to use an immediate operand -- this has \
1523 happened! -- we'll get a compile-time error. */ \
1524 void NAME(Register Rd, Register Rn, unsigned imm, \
1525 enum shift_kind kind = LSL, unsigned shift = 0) { \
1526 assert(false, " can't be used with immediate operand"); \
1527 }
1528
1529 INSN(bic, 1, 0b00, 1);
1530 INSN(orn, 1, 0b01, 1);
1531 INSN(eon, 1, 0b10, 1);
1532 INSN(bics, 1, 0b11, 1);
1533 INSN(bicw, 0, 0b00, 1);
1534 INSN(ornw, 0, 0b01, 1);
1535 INSN(eonw, 0, 0b10, 1);
1536 INSN(bicsw, 0, 0b11, 1);
1537
1538 #undef INSN
1539
1540 #ifdef _WIN64
1541 // In MSVC, `mvn` is defined as a macro and it screws up compilation
1542 #undef mvn
1543 #endif
1544
1545 // Aliases for short forms of orn
1546 void mvn(Register Rd, Register Rm,
1547 enum shift_kind kind = LSL, unsigned shift = 0) {
1548 orn(Rd, zr, Rm, kind, shift);
1549 }
1550
1551 void mvnw(Register Rd, Register Rm,
1552 enum shift_kind kind = LSL, unsigned shift = 0) {
1553 ornw(Rd, zr, Rm, kind, shift);
1554 }
1555
1556 // Add/subtract (shifted register)
1557 #define INSN(NAME, size, op) \
1558 void NAME(Register Rd, Register Rn, Register Rm, \
1559 enum shift_kind kind, unsigned shift = 0) { \
1560 starti; \
1561 f(0, 21); \
1562 assert_cond(kind != ROR); \
1563 guarantee(size == 1 || shift < 32, "incorrect shift");\
1564 zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \
2679 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value");
2680 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21);
2681 rf(Vm, 16), f(0, 15), f(index, 14, 11);
2682 f(0, 10), rf(Vn, 5), rf(Vd, 0);
2683 }
2684
2685 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
2686 }
2687
2688 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
2689 Register tmp,
2690 int offset) {
2691 ShouldNotCallThis();
2692 return RegisterOrConstant();
2693 }
2694
2695 // Stack overflow checking
2696 virtual void bang_stack_with_offset(int offset);
2697
2698 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
2699 static bool operand_valid_for_add_sub_immediate(int64_t imm);
2700 static bool operand_valid_for_float_immediate(double imm);
2701
2702 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
2703 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
2704 };
2705
2706 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
2707 Assembler::Membar_mask_bits b) {
2708 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
2709 }
2710
2711 Instruction_aarch64::~Instruction_aarch64() {
2712 assem->emit();
2713 }
2714
2715 #undef starti
2716
2717 // Invert a condition
2718 inline const Assembler::Condition operator~(const Assembler::Condition cond) {
2719 return Assembler::Condition(int(cond) ^ 1);
|