182
183 u = val << (31 - hi);
184 n = n >> (31 - hi + lo);
185 return n;
186 }
187
188 static inline uint32_t extract(uint32_t val, int msb, int lsb) {
189 int nbits = msb - lsb + 1;
190 assert_cond(msb >= lsb);
191 uint32_t mask = (1U << nbits) - 1;
192 uint32_t result = val >> lsb;
193 result &= mask;
194 return result;
195 }
196
197 static inline int32_t sextract(uint32_t val, int msb, int lsb) {
198 uint32_t uval = extract(val, msb, lsb);
199 return extend(uval, msb - lsb);
200 }
201
202 static void patch(address a, int msb, int lsb, unsigned long val) {
203 int nbits = msb - lsb + 1;
204 guarantee(val < (1U << nbits), "Field too big for insn");
205 assert_cond(msb >= lsb);
206 unsigned mask = (1U << nbits) - 1;
207 val <<= lsb;
208 mask <<= lsb;
209 unsigned target = *(unsigned *)a;
210 target &= ~mask;
211 target |= val;
212 *(unsigned *)a = target;
213 }
214
215 static void spatch(address a, int msb, int lsb, long val) {
216 int nbits = msb - lsb + 1;
217 long chk = val >> (nbits - 1);
218 guarantee (chk == -1 || chk == 0, "Field too big for insn");
219 unsigned uval = val;
220 unsigned mask = (1U << nbits) - 1;
221 uval &= mask;
222 uval <<= lsb;
223 mask <<= lsb;
224 unsigned target = *(unsigned *)a;
225 target &= ~mask;
226 target |= uval;
227 *(unsigned *)a = target;
228 }
229
230 void f(unsigned val, int msb, int lsb) {
231 int nbits = msb - lsb + 1;
232 guarantee(val < (1U << nbits), "Field too big for insn");
233 assert_cond(msb >= lsb);
234 unsigned mask = (1U << nbits) - 1;
235 val <<= lsb;
236 mask <<= lsb;
237 insn |= val;
238 assert_cond((bits & mask) == 0);
239 #ifdef ASSERT
240 bits |= mask;
241 #endif
242 }
243
244 void f(unsigned val, int bit) {
245 f(val, bit, bit);
246 }
247
248 void sf(long val, int msb, int lsb) {
249 int nbits = msb - lsb + 1;
250 long chk = val >> (nbits - 1);
251 guarantee (chk == -1 || chk == 0, "Field too big for insn");
252 unsigned uval = val;
253 unsigned mask = (1U << nbits) - 1;
254 uval &= mask;
255 f(uval, lsb + nbits - 1, lsb);
256 }
257
258 void rf(Register r, int lsb) {
259 f(r->encoding_nocheck(), lsb + 4, lsb);
260 }
261
262 // reg|ZR
263 void zrf(Register r, int lsb) {
264 f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb);
265 }
266
267 // reg|SP
268 void srf(Register r, int lsb) {
269 f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb);
270 }
340 class uxtw : public extend {
341 public:
342 uxtw(int shift = -1): extend(shift, 0b010, ext::uxtw) { }
343 };
344 class lsl : public extend {
345 public:
346 lsl(int shift = -1): extend(shift, 0b011, ext::uxtx) { }
347 };
348 class sxtw : public extend {
349 public:
350 sxtw(int shift = -1): extend(shift, 0b110, ext::sxtw) { }
351 };
352 class sxtx : public extend {
353 public:
354 sxtx(int shift = -1): extend(shift, 0b111, ext::sxtx) { }
355 };
356
357 private:
358 Register _base;
359 Register _index;
360 long _offset;
361 enum mode _mode;
362 extend _ext;
363
364 RelocationHolder _rspec;
365
366 // Typically we use AddressLiterals we want to use their rval
367 // However in some situations we want the lval (effect address) of
368 // the item. We provide a special factory for making those lvals.
369 bool _is_lval;
370
371 // If the target is far we'll need to load the ea of this to a
372 // register to reach it. Otherwise if near we can do PC-relative
373 // addressing.
374 address _target;
375
376 public:
377 Address()
378 : _mode(no_mode) { }
379 Address(Register r)
380 : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { }
381 Address(Register r, int o)
382 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
383 Address(Register r, long o)
384 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
385 Address(Register r, unsigned long o)
386 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
387 #ifdef ASSERT
388 Address(Register r, ByteSize disp)
389 : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { }
390 #endif
391 Address(Register r, Register r1, extend ext = lsl())
392 : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg),
393 _ext(ext), _target(0) { }
394 Address(Pre p)
395 : _base(p.reg()), _offset(p.offset()), _mode(pre) { }
396 Address(Post p)
397 : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()),
398 _mode(p.is_postreg() ? post_reg : post), _target(0) { }
399 Address(address target, RelocationHolder const& rspec)
400 : _mode(literal),
401 _rspec(rspec),
402 _is_lval(false),
403 _target(target) { }
404 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type);
405 Address(Register base, RegisterOrConstant index, extend ext = lsl())
406 : _base (base),
407 _offset(0), _ext(ext), _target(0) {
408 if (index.is_register()) {
409 _mode = base_plus_offset_reg;
410 _index = index.as_register();
411 } else {
412 guarantee(ext.option() == ext::uxtx, "should be");
413 assert(index.is_constant(), "should be");
414 _mode = base_plus_offset;
415 _offset = index.as_constant() << ext.shift();
416 }
417 }
418
419 Register base() const {
420 guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg
421 | _mode == post | _mode == post_reg),
422 "wrong mode");
423 return _base;
424 }
425 long offset() const {
426 return _offset;
427 }
428 Register index() const {
429 return _index;
430 }
431 mode getMode() const {
432 return _mode;
433 }
434 bool uses(Register reg) const { return _base == reg || _index == reg; }
435 address target() const { return _target; }
436 const RelocationHolder& rspec() const { return _rspec; }
437
438 void encode(Instruction_aarch64 *i) const {
439 i->f(0b111, 29, 27);
440 i->srf(_base, 5);
441
442 switch(_mode) {
443 case base_plus_offset:
444 {
445 unsigned size = i->get(31, 30);
537 size = 4 << size;
538 guarantee(_offset % size == 0, "bad offset");
539 i->sf(_offset / size, 21, 15);
540 i->srf(_base, 5);
541 }
542
543 void encode_nontemporal_pair(Instruction_aarch64 *i) const {
544 // Only base + offset is allowed
545 i->f(0b000, 25, 23);
546 unsigned size = i->get(31, 31);
547 size = 4 << size;
548 guarantee(_offset % size == 0, "bad offset");
549 i->sf(_offset / size, 21, 15);
550 i->srf(_base, 5);
551 guarantee(_mode == Address::base_plus_offset,
552 "Bad addressing mode for non-temporal op");
553 }
554
555 void lea(MacroAssembler *, Register) const;
556
557 static bool offset_ok_for_immed(long offset, int shift) {
558 unsigned mask = (1 << shift) - 1;
559 if (offset < 0 || offset & mask) {
560 return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset
561 } else {
562 return ((offset >> shift) < (1 << (21 - 10 + 1))); // Scaled, unsigned offset
563 }
564 }
565 };
566
567 // Convience classes
568 class RuntimeAddress: public Address {
569
570 public:
571
572 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {}
573
574 };
575
576 class OopAddress: public Address {
577
599
600 class InternalAddress: public Address {
601
602 public:
603
604 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {}
605 };
606
607 const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers *
608 FloatRegisterImpl::save_slots_per_register;
609
610 typedef enum {
611 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
612 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM,
613 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM
614 } prfop;
615
616 class Assembler : public AbstractAssembler {
617
618 #ifndef PRODUCT
619 static const unsigned long asm_bp;
620
621 void emit_long(jint x) {
622 if ((unsigned long)pc() == asm_bp)
623 asm volatile ("nop");
624 AbstractAssembler::emit_int32(x);
625 }
626 #else
627 void emit_long(jint x) {
628 AbstractAssembler::emit_int32(x);
629 }
630 #endif
631
632 public:
633
634 enum { instruction_size = 4 };
635
636 //---< calculate length of instruction >---
637 // We just use the values set above.
638 // instruction must start at passed address
639 static unsigned int instr_len(unsigned char *instr) { return instruction_size; }
640
641 //---< longest instructions >---
642 static unsigned int instr_maxlen() { return instruction_size; }
653 }
654
655 Address post(Register base, int offset) {
656 return adjust(base, offset, false);
657 }
658
659 Address post(Register base, Register idx) {
660 return Address(Post(base, idx));
661 }
662
663 Instruction_aarch64* current;
664
665 void set_current(Instruction_aarch64* i) { current = i; }
666
667 void f(unsigned val, int msb, int lsb) {
668 current->f(val, msb, lsb);
669 }
670 void f(unsigned val, int msb) {
671 current->f(val, msb, msb);
672 }
673 void sf(long val, int msb, int lsb) {
674 current->sf(val, msb, lsb);
675 }
676 void rf(Register reg, int lsb) {
677 current->rf(reg, lsb);
678 }
679 void srf(Register reg, int lsb) {
680 current->srf(reg, lsb);
681 }
682 void zrf(Register reg, int lsb) {
683 current->zrf(reg, lsb);
684 }
685 void rf(FloatRegister reg, int lsb) {
686 current->rf(reg, lsb);
687 }
688 void fixed(unsigned value, unsigned mask) {
689 current->fixed(value, mask);
690 }
691
692 void emit() {
693 emit_long(current->get_insn());
703 void wrap_label(Label &L, uncond_branch_insn insn);
704 void wrap_label(Register r, Label &L, compare_and_branch_insn insn);
705 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn);
706 void wrap_label(Label &L, prfop, prefetch_insn insn);
707
708 // PC-rel. addressing
709
710 void adr(Register Rd, address dest);
711 void _adrp(Register Rd, address dest);
712
713 void adr(Register Rd, const Address &dest);
714 void _adrp(Register Rd, const Address &dest);
715
716 void adr(Register Rd, Label &L) {
717 wrap_label(Rd, L, &Assembler::Assembler::adr);
718 }
719 void _adrp(Register Rd, Label &L) {
720 wrap_label(Rd, L, &Assembler::_adrp);
721 }
722
723 void adrp(Register Rd, const Address &dest, unsigned long &offset);
724
725 #undef INSN
726
727 void add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op,
728 int negated_op);
729
730 // Add/subtract (immediate)
731 #define INSN(NAME, decode, negated) \
732 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \
733 starti; \
734 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \
735 zrf(Rd, 0), srf(Rn, 5); \
736 } \
737 \
738 void NAME(Register Rd, Register Rn, unsigned imm) { \
739 starti; \
740 add_sub_immediate(Rd, Rn, imm, decode, negated); \
741 }
742
743 INSN(addsw, 0b001, 0b011);
829 #undef INSN
830
831 // Extract
832 #define INSN(NAME, opcode, size) \
833 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \
834 starti; \
835 guarantee(size == 1 || imms < 32, "incorrect imms"); \
836 f(opcode, 31, 21), f(imms, 15, 10); \
837 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
838 }
839
840 INSN(extrw, 0b00010011100, 0);
841 INSN(extr, 0b10010011110, 1);
842
843 #undef INSN
844
845 // The maximum range of a branch is fixed for the AArch64
846 // architecture. In debug mode we shrink it in order to test
847 // trampolines, but not so small that branches in the interpreter
848 // are out of range.
849 static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
850
851 static bool reachable_from_branch_at(address branch, address target) {
852 return uabs(target - branch) < branch_range;
853 }
854
855 // Unconditional branch (immediate)
856 #define INSN(NAME, opcode) \
857 void NAME(address dest) { \
858 starti; \
859 long offset = (dest - pc()) >> 2; \
860 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \
861 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \
862 } \
863 void NAME(Label &L) { \
864 wrap_label(L, &Assembler::NAME); \
865 } \
866 void NAME(const Address &dest);
867
868 INSN(b, 0);
869 INSN(bl, 1);
870
871 #undef INSN
872
873 // Compare & branch (immediate)
874 #define INSN(NAME, opcode) \
875 void NAME(Register Rt, address dest) { \
876 long offset = (dest - pc()) >> 2; \
877 starti; \
878 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \
879 } \
880 void NAME(Register Rt, Label &L) { \
881 wrap_label(Rt, L, &Assembler::NAME); \
882 }
883
884 INSN(cbzw, 0b00110100);
885 INSN(cbnzw, 0b00110101);
886 INSN(cbz, 0b10110100);
887 INSN(cbnz, 0b10110101);
888
889 #undef INSN
890
891 // Test & branch (immediate)
892 #define INSN(NAME, opcode) \
893 void NAME(Register Rt, int bitpos, address dest) { \
894 long offset = (dest - pc()) >> 2; \
895 int b5 = bitpos >> 5; \
896 bitpos &= 0x1f; \
897 starti; \
898 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \
899 rf(Rt, 0); \
900 } \
901 void NAME(Register Rt, int bitpos, Label &L) { \
902 wrap_label(Rt, bitpos, L, &Assembler::NAME); \
903 }
904
905 INSN(tbz, 0b0110110);
906 INSN(tbnz, 0b0110111);
907
908 #undef INSN
909
910 // Conditional branch (immediate)
911 enum Condition
912 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
913
914 void br(Condition cond, address dest) {
915 long offset = (dest - pc()) >> 2;
916 starti;
917 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
918 }
919
920 #define INSN(NAME, cond) \
921 void NAME(address dest) { \
922 br(cond, dest); \
923 }
924
925 INSN(beq, EQ);
926 INSN(bne, NE);
927 INSN(bhs, HS);
928 INSN(bcs, CS);
929 INSN(blo, LO);
930 INSN(bcc, CC);
931 INSN(bmi, MI);
932 INSN(bpl, PL);
933 INSN(bvs, VS);
934 INSN(bvc, VC);
935 INSN(bhi, HI);
1275 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1276 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \
1277 } \
1278 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\
1279 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \
1280 }
1281 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000);
1282 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001);
1283 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010);
1284 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011);
1285 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100);
1286 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101);
1287 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110);
1288 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111);
1289 INSN(swp, swpa, swpl, swpal, 1, 0b000);
1290 #undef INSN
1291
1292 // Load register (literal)
1293 #define INSN(NAME, opc, V) \
1294 void NAME(Register Rt, address dest) { \
1295 long offset = (dest - pc()) >> 2; \
1296 starti; \
1297 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1298 sf(offset, 23, 5); \
1299 rf(Rt, 0); \
1300 } \
1301 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \
1302 InstructionMark im(this); \
1303 guarantee(rtype == relocInfo::internal_word_type, \
1304 "only internal_word_type relocs make sense here"); \
1305 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \
1306 NAME(Rt, dest); \
1307 } \
1308 void NAME(Register Rt, Label &L) { \
1309 wrap_label(Rt, L, &Assembler::NAME); \
1310 }
1311
1312 INSN(ldrw, 0b00, 0);
1313 INSN(ldr, 0b01, 0);
1314 INSN(ldrsw, 0b10, 0);
1315
1316 #undef INSN
1317
1318 #define INSN(NAME, opc, V) \
1319 void NAME(FloatRegister Rt, address dest) { \
1320 long offset = (dest - pc()) >> 2; \
1321 starti; \
1322 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1323 sf(offset, 23, 5); \
1324 rf((Register)Rt, 0); \
1325 }
1326
1327 INSN(ldrs, 0b00, 1);
1328 INSN(ldrd, 0b01, 1);
1329 INSN(ldrq, 0b10, 1);
1330
1331 #undef INSN
1332
1333 #define INSN(NAME, opc, V) \
1334 void NAME(address dest, prfop op = PLDL1KEEP) { \
1335 long offset = (dest - pc()) >> 2; \
1336 starti; \
1337 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1338 sf(offset, 23, 5); \
1339 f(op, 4, 0); \
1340 } \
1341 void NAME(Label &L, prfop op = PLDL1KEEP) { \
1342 wrap_label(L, op, &Assembler::NAME); \
1343 }
1344
1345 INSN(prfm, 0b11, 0);
1346
1347 #undef INSN
1348
1349 // Load/store
1350 void ld_st1(int opc, int p1, int V, int L,
1351 Register Rt1, Register Rt2, Address adr, bool no_allocate) {
1352 starti;
1353 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22);
1354 zrf(Rt2, 10), zrf(Rt1, 0);
1355 if (no_allocate) {
1391 INSN(stpq, 0b10, 0b101, 1, 0, false);
1392 INSN(ldpq, 0b10, 0b101, 1, 1, false);
1393
1394 #undef INSN
1395
1396 // Load/store register (all modes)
1397 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) {
1398 starti;
1399
1400 f(V, 26); // general reg?
1401 zrf(Rt, 0);
1402
1403 // Encoding for literal loads is done here (rather than pushed
1404 // down into Address::encode) because the encoding of this
1405 // instruction is too different from all of the other forms to
1406 // make it worth sharing.
1407 if (adr.getMode() == Address::literal) {
1408 assert(size == 0b10 || size == 0b11, "bad operand size in ldr");
1409 assert(op == 0b01, "literal form can only be used with loads");
1410 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
1411 long offset = (adr.target() - pc()) >> 2;
1412 sf(offset, 23, 5);
1413 code_section()->relocate(pc(), adr.rspec());
1414 return;
1415 }
1416
1417 f(size, 31, 30);
1418 f(op, 23, 22); // str
1419 adr.encode(current);
1420 }
1421
1422 #define INSN(NAME, size, op) \
1423 void NAME(Register Rt, const Address &adr) { \
1424 ld_st2(Rt, adr, size, op); \
1425 } \
1426
1427 INSN(str, 0b11, 0b00);
1428 INSN(strw, 0b10, 0b00);
1429 INSN(strb, 0b00, 0b00);
1430 INSN(strh, 0b01, 0b00);
1431
2666 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value");
2667 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21);
2668 rf(Vm, 16), f(0, 15), f(index, 14, 11);
2669 f(0, 10), rf(Vn, 5), rf(Vd, 0);
2670 }
2671
2672 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
2673 }
2674
2675 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
2676 Register tmp,
2677 int offset) {
2678 ShouldNotCallThis();
2679 return RegisterOrConstant();
2680 }
2681
2682 // Stack overflow checking
2683 virtual void bang_stack_with_offset(int offset);
2684
2685 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
2686 static bool operand_valid_for_add_sub_immediate(long imm);
2687 static bool operand_valid_for_float_immediate(double imm);
2688
2689 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
2690 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
2691 };
2692
2693 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
2694 Assembler::Membar_mask_bits b) {
2695 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
2696 }
2697
2698 Instruction_aarch64::~Instruction_aarch64() {
2699 assem->emit();
2700 }
2701
2702 #undef starti
2703
2704 // Invert a condition
2705 inline const Assembler::Condition operator~(const Assembler::Condition cond) {
2706 return Assembler::Condition(int(cond) ^ 1);
|
182
183 u = val << (31 - hi);
184 n = n >> (31 - hi + lo);
185 return n;
186 }
187
188 static inline uint32_t extract(uint32_t val, int msb, int lsb) {
189 int nbits = msb - lsb + 1;
190 assert_cond(msb >= lsb);
191 uint32_t mask = (1U << nbits) - 1;
192 uint32_t result = val >> lsb;
193 result &= mask;
194 return result;
195 }
196
197 static inline int32_t sextract(uint32_t val, int msb, int lsb) {
198 uint32_t uval = extract(val, msb, lsb);
199 return extend(uval, msb - lsb);
200 }
201
202 static void patch(address a, int msb, int lsb, uint64_t val) {
203 int nbits = msb - lsb + 1;
204 guarantee(val < (1U << nbits), "Field too big for insn");
205 assert_cond(msb >= lsb);
206 unsigned mask = (1U << nbits) - 1;
207 val <<= lsb;
208 mask <<= lsb;
209 unsigned target = *(unsigned *)a;
210 target &= ~mask;
211 target |= val;
212 *(unsigned *)a = target;
213 }
214
215 static void spatch(address a, int msb, int lsb, int64_t val) {
216 int nbits = msb - lsb + 1;
217 int64_t chk = val >> (nbits - 1);
218 guarantee (chk == -1 || chk == 0, "Field too big for insn");
219 unsigned uval = val;
220 unsigned mask = (1U << nbits) - 1;
221 uval &= mask;
222 uval <<= lsb;
223 mask <<= lsb;
224 unsigned target = *(unsigned *)a;
225 target &= ~mask;
226 target |= uval;
227 *(unsigned *)a = target;
228 }
229
230 void f(unsigned val, int msb, int lsb) {
231 int nbits = msb - lsb + 1;
232 guarantee(val < (1U << nbits), "Field too big for insn");
233 assert_cond(msb >= lsb);
234 unsigned mask = (1U << nbits) - 1;
235 val <<= lsb;
236 mask <<= lsb;
237 insn |= val;
238 assert_cond((bits & mask) == 0);
239 #ifdef ASSERT
240 bits |= mask;
241 #endif
242 }
243
244 void f(unsigned val, int bit) {
245 f(val, bit, bit);
246 }
247
248 void sf(int64_t val, int msb, int lsb) {
249 int nbits = msb - lsb + 1;
250 int64_t chk = val >> (nbits - 1);
251 guarantee (chk == -1 || chk == 0, "Field too big for insn");
252 unsigned uval = val;
253 unsigned mask = (1U << nbits) - 1;
254 uval &= mask;
255 f(uval, lsb + nbits - 1, lsb);
256 }
257
258 void rf(Register r, int lsb) {
259 f(r->encoding_nocheck(), lsb + 4, lsb);
260 }
261
262 // reg|ZR
263 void zrf(Register r, int lsb) {
264 f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb);
265 }
266
267 // reg|SP
268 void srf(Register r, int lsb) {
269 f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb);
270 }
340 class uxtw : public extend {
341 public:
342 uxtw(int shift = -1): extend(shift, 0b010, ext::uxtw) { }
343 };
344 class lsl : public extend {
345 public:
346 lsl(int shift = -1): extend(shift, 0b011, ext::uxtx) { }
347 };
348 class sxtw : public extend {
349 public:
350 sxtw(int shift = -1): extend(shift, 0b110, ext::sxtw) { }
351 };
352 class sxtx : public extend {
353 public:
354 sxtx(int shift = -1): extend(shift, 0b111, ext::sxtx) { }
355 };
356
357 private:
358 Register _base;
359 Register _index;
360 int64_t _offset;
361 enum mode _mode;
362 extend _ext;
363
364 RelocationHolder _rspec;
365
366 // Typically we use AddressLiterals we want to use their rval
367 // However in some situations we want the lval (effect address) of
368 // the item. We provide a special factory for making those lvals.
369 bool _is_lval;
370
371 // If the target is far we'll need to load the ea of this to a
372 // register to reach it. Otherwise if near we can do PC-relative
373 // addressing.
374 address _target;
375
376 public:
377 Address()
378 : _mode(no_mode) { }
379 Address(Register r)
380 : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { }
381 Address(Register r, int o)
382 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
383 Address(Register r, int64_t o)
384 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
385 Address(Register r, uint64_t o)
386 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
387 #ifdef ASSERT
388 Address(Register r, ByteSize disp)
389 : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { }
390 #endif
391 Address(Register r, Register r1, extend ext = lsl())
392 : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg),
393 _ext(ext), _target(0) { }
394 Address(Pre p)
395 : _base(p.reg()), _offset(p.offset()), _mode(pre) { }
396 Address(Post p)
397 : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()),
398 _mode(p.is_postreg() ? post_reg : post), _target(0) { }
399 Address(address target, RelocationHolder const& rspec)
400 : _mode(literal),
401 _rspec(rspec),
402 _is_lval(false),
403 _target(target) { }
404 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type);
405 Address(Register base, RegisterOrConstant index, extend ext = lsl())
406 : _base (base),
407 _offset(0), _ext(ext), _target(0) {
408 if (index.is_register()) {
409 _mode = base_plus_offset_reg;
410 _index = index.as_register();
411 } else {
412 guarantee(ext.option() == ext::uxtx, "should be");
413 assert(index.is_constant(), "should be");
414 _mode = base_plus_offset;
415 _offset = index.as_constant() << ext.shift();
416 }
417 }
418
419 Register base() const {
420 guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg
421 | _mode == post | _mode == post_reg),
422 "wrong mode");
423 return _base;
424 }
425 int64_t offset() const {
426 return _offset;
427 }
428 Register index() const {
429 return _index;
430 }
431 mode getMode() const {
432 return _mode;
433 }
434 bool uses(Register reg) const { return _base == reg || _index == reg; }
435 address target() const { return _target; }
436 const RelocationHolder& rspec() const { return _rspec; }
437
438 void encode(Instruction_aarch64 *i) const {
439 i->f(0b111, 29, 27);
440 i->srf(_base, 5);
441
442 switch(_mode) {
443 case base_plus_offset:
444 {
445 unsigned size = i->get(31, 30);
537 size = 4 << size;
538 guarantee(_offset % size == 0, "bad offset");
539 i->sf(_offset / size, 21, 15);
540 i->srf(_base, 5);
541 }
542
543 void encode_nontemporal_pair(Instruction_aarch64 *i) const {
544 // Only base + offset is allowed
545 i->f(0b000, 25, 23);
546 unsigned size = i->get(31, 31);
547 size = 4 << size;
548 guarantee(_offset % size == 0, "bad offset");
549 i->sf(_offset / size, 21, 15);
550 i->srf(_base, 5);
551 guarantee(_mode == Address::base_plus_offset,
552 "Bad addressing mode for non-temporal op");
553 }
554
555 void lea(MacroAssembler *, Register) const;
556
557 static bool offset_ok_for_immed(int64_t offset, int shift) {
558 unsigned mask = (1 << shift) - 1;
559 if (offset < 0 || offset & mask) {
560 return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset
561 } else {
562 return ((offset >> shift) < (1 << (21 - 10 + 1))); // Scaled, unsigned offset
563 }
564 }
565 };
566
567 // Convience classes
568 class RuntimeAddress: public Address {
569
570 public:
571
572 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {}
573
574 };
575
576 class OopAddress: public Address {
577
599
600 class InternalAddress: public Address {
601
602 public:
603
604 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {}
605 };
606
607 const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers *
608 FloatRegisterImpl::save_slots_per_register;
609
610 typedef enum {
611 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM,
612 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM,
613 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM
614 } prfop;
615
616 class Assembler : public AbstractAssembler {
617
618 #ifndef PRODUCT
619 static const uint64_t asm_bp;
620
621 void emit_long(jint x) {
622 if ((uint64_t)pc() == asm_bp)
623 asm volatile ("nop");
624 AbstractAssembler::emit_int32(x);
625 }
626 #else
627 void emit_long(jint x) {
628 AbstractAssembler::emit_int32(x);
629 }
630 #endif
631
632 public:
633
634 enum { instruction_size = 4 };
635
636 //---< calculate length of instruction >---
637 // We just use the values set above.
638 // instruction must start at passed address
639 static unsigned int instr_len(unsigned char *instr) { return instruction_size; }
640
641 //---< longest instructions >---
642 static unsigned int instr_maxlen() { return instruction_size; }
653 }
654
655 Address post(Register base, int offset) {
656 return adjust(base, offset, false);
657 }
658
659 Address post(Register base, Register idx) {
660 return Address(Post(base, idx));
661 }
662
663 Instruction_aarch64* current;
664
665 void set_current(Instruction_aarch64* i) { current = i; }
666
667 void f(unsigned val, int msb, int lsb) {
668 current->f(val, msb, lsb);
669 }
670 void f(unsigned val, int msb) {
671 current->f(val, msb, msb);
672 }
673 void sf(int64_t val, int msb, int lsb) {
674 current->sf(val, msb, lsb);
675 }
676 void rf(Register reg, int lsb) {
677 current->rf(reg, lsb);
678 }
679 void srf(Register reg, int lsb) {
680 current->srf(reg, lsb);
681 }
682 void zrf(Register reg, int lsb) {
683 current->zrf(reg, lsb);
684 }
685 void rf(FloatRegister reg, int lsb) {
686 current->rf(reg, lsb);
687 }
688 void fixed(unsigned value, unsigned mask) {
689 current->fixed(value, mask);
690 }
691
692 void emit() {
693 emit_long(current->get_insn());
703 void wrap_label(Label &L, uncond_branch_insn insn);
704 void wrap_label(Register r, Label &L, compare_and_branch_insn insn);
705 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn);
706 void wrap_label(Label &L, prfop, prefetch_insn insn);
707
708 // PC-rel. addressing
709
710 void adr(Register Rd, address dest);
711 void _adrp(Register Rd, address dest);
712
713 void adr(Register Rd, const Address &dest);
714 void _adrp(Register Rd, const Address &dest);
715
716 void adr(Register Rd, Label &L) {
717 wrap_label(Rd, L, &Assembler::Assembler::adr);
718 }
719 void _adrp(Register Rd, Label &L) {
720 wrap_label(Rd, L, &Assembler::_adrp);
721 }
722
723 void adrp(Register Rd, const Address &dest, uint64_t &offset);
724
725 #undef INSN
726
727 void add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op,
728 int negated_op);
729
730 // Add/subtract (immediate)
731 #define INSN(NAME, decode, negated) \
732 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \
733 starti; \
734 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \
735 zrf(Rd, 0), srf(Rn, 5); \
736 } \
737 \
738 void NAME(Register Rd, Register Rn, unsigned imm) { \
739 starti; \
740 add_sub_immediate(Rd, Rn, imm, decode, negated); \
741 }
742
743 INSN(addsw, 0b001, 0b011);
829 #undef INSN
830
831 // Extract
832 #define INSN(NAME, opcode, size) \
833 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \
834 starti; \
835 guarantee(size == 1 || imms < 32, "incorrect imms"); \
836 f(opcode, 31, 21), f(imms, 15, 10); \
837 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
838 }
839
840 INSN(extrw, 0b00010011100, 0);
841 INSN(extr, 0b10010011110, 1);
842
843 #undef INSN
844
845 // The maximum range of a branch is fixed for the AArch64
846 // architecture. In debug mode we shrink it in order to test
847 // trampolines, but not so small that branches in the interpreter
848 // are out of range.
849 static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
850
851 static bool reachable_from_branch_at(address branch, address target) {
852 return uabs(target - branch) < branch_range;
853 }
854
855 // Unconditional branch (immediate)
856 #define INSN(NAME, opcode) \
857 void NAME(address dest) { \
858 starti; \
859 int64_t offset = (dest - pc()) >> 2; \
860 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \
861 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \
862 } \
863 void NAME(Label &L) { \
864 wrap_label(L, &Assembler::NAME); \
865 } \
866 void NAME(const Address &dest);
867
868 INSN(b, 0);
869 INSN(bl, 1);
870
871 #undef INSN
872
873 // Compare & branch (immediate)
874 #define INSN(NAME, opcode) \
875 void NAME(Register Rt, address dest) { \
876 int64_t offset = (dest - pc()) >> 2; \
877 starti; \
878 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \
879 } \
880 void NAME(Register Rt, Label &L) { \
881 wrap_label(Rt, L, &Assembler::NAME); \
882 }
883
884 INSN(cbzw, 0b00110100);
885 INSN(cbnzw, 0b00110101);
886 INSN(cbz, 0b10110100);
887 INSN(cbnz, 0b10110101);
888
889 #undef INSN
890
891 // Test & branch (immediate)
892 #define INSN(NAME, opcode) \
893 void NAME(Register Rt, int bitpos, address dest) { \
894 int64_t offset = (dest - pc()) >> 2; \
895 int b5 = bitpos >> 5; \
896 bitpos &= 0x1f; \
897 starti; \
898 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \
899 rf(Rt, 0); \
900 } \
901 void NAME(Register Rt, int bitpos, Label &L) { \
902 wrap_label(Rt, bitpos, L, &Assembler::NAME); \
903 }
904
905 INSN(tbz, 0b0110110);
906 INSN(tbnz, 0b0110111);
907
908 #undef INSN
909
910 // Conditional branch (immediate)
911 enum Condition
912 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
913
914 void br(Condition cond, address dest) {
915 int64_t offset = (dest - pc()) >> 2;
916 starti;
917 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
918 }
919
920 #define INSN(NAME, cond) \
921 void NAME(address dest) { \
922 br(cond, dest); \
923 }
924
925 INSN(beq, EQ);
926 INSN(bne, NE);
927 INSN(bhs, HS);
928 INSN(bcs, CS);
929 INSN(blo, LO);
930 INSN(bcc, CC);
931 INSN(bmi, MI);
932 INSN(bpl, PL);
933 INSN(bvs, VS);
934 INSN(bvc, VC);
935 INSN(bhi, HI);
1275 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \
1276 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \
1277 } \
1278 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\
1279 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \
1280 }
1281 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000);
1282 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001);
1283 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010);
1284 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011);
1285 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100);
1286 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101);
1287 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110);
1288 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111);
1289 INSN(swp, swpa, swpl, swpal, 1, 0b000);
1290 #undef INSN
1291
1292 // Load register (literal)
1293 #define INSN(NAME, opc, V) \
1294 void NAME(Register Rt, address dest) { \
1295 int64_t offset = (dest - pc()) >> 2; \
1296 starti; \
1297 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1298 sf(offset, 23, 5); \
1299 rf(Rt, 0); \
1300 } \
1301 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \
1302 InstructionMark im(this); \
1303 guarantee(rtype == relocInfo::internal_word_type, \
1304 "only internal_word_type relocs make sense here"); \
1305 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \
1306 NAME(Rt, dest); \
1307 } \
1308 void NAME(Register Rt, Label &L) { \
1309 wrap_label(Rt, L, &Assembler::NAME); \
1310 }
1311
1312 INSN(ldrw, 0b00, 0);
1313 INSN(ldr, 0b01, 0);
1314 INSN(ldrsw, 0b10, 0);
1315
1316 #undef INSN
1317
1318 #define INSN(NAME, opc, V) \
1319 void NAME(FloatRegister Rt, address dest) { \
1320 int64_t offset = (dest - pc()) >> 2; \
1321 starti; \
1322 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1323 sf(offset, 23, 5); \
1324 rf((Register)Rt, 0); \
1325 }
1326
1327 INSN(ldrs, 0b00, 1);
1328 INSN(ldrd, 0b01, 1);
1329 INSN(ldrq, 0b10, 1);
1330
1331 #undef INSN
1332
1333 #define INSN(NAME, opc, V) \
1334 void NAME(address dest, prfop op = PLDL1KEEP) { \
1335 int64_t offset = (dest - pc()) >> 2; \
1336 starti; \
1337 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
1338 sf(offset, 23, 5); \
1339 f(op, 4, 0); \
1340 } \
1341 void NAME(Label &L, prfop op = PLDL1KEEP) { \
1342 wrap_label(L, op, &Assembler::NAME); \
1343 }
1344
1345 INSN(prfm, 0b11, 0);
1346
1347 #undef INSN
1348
1349 // Load/store
1350 void ld_st1(int opc, int p1, int V, int L,
1351 Register Rt1, Register Rt2, Address adr, bool no_allocate) {
1352 starti;
1353 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22);
1354 zrf(Rt2, 10), zrf(Rt1, 0);
1355 if (no_allocate) {
1391 INSN(stpq, 0b10, 0b101, 1, 0, false);
1392 INSN(ldpq, 0b10, 0b101, 1, 1, false);
1393
1394 #undef INSN
1395
1396 // Load/store register (all modes)
1397 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) {
1398 starti;
1399
1400 f(V, 26); // general reg?
1401 zrf(Rt, 0);
1402
1403 // Encoding for literal loads is done here (rather than pushed
1404 // down into Address::encode) because the encoding of this
1405 // instruction is too different from all of the other forms to
1406 // make it worth sharing.
1407 if (adr.getMode() == Address::literal) {
1408 assert(size == 0b10 || size == 0b11, "bad operand size in ldr");
1409 assert(op == 0b01, "literal form can only be used with loads");
1410 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
1411 int64_t offset = (adr.target() - pc()) >> 2;
1412 sf(offset, 23, 5);
1413 code_section()->relocate(pc(), adr.rspec());
1414 return;
1415 }
1416
1417 f(size, 31, 30);
1418 f(op, 23, 22); // str
1419 adr.encode(current);
1420 }
1421
1422 #define INSN(NAME, size, op) \
1423 void NAME(Register Rt, const Address &adr) { \
1424 ld_st2(Rt, adr, size, op); \
1425 } \
1426
1427 INSN(str, 0b11, 0b00);
1428 INSN(strw, 0b10, 0b00);
1429 INSN(strb, 0b00, 0b00);
1430 INSN(strh, 0b01, 0b00);
1431
2666 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value");
2667 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21);
2668 rf(Vm, 16), f(0, 15), f(index, 14, 11);
2669 f(0, 10), rf(Vn, 5), rf(Vd, 0);
2670 }
2671
2672 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
2673 }
2674
2675 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
2676 Register tmp,
2677 int offset) {
2678 ShouldNotCallThis();
2679 return RegisterOrConstant();
2680 }
2681
2682 // Stack overflow checking
2683 virtual void bang_stack_with_offset(int offset);
2684
2685 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
2686 static bool operand_valid_for_add_sub_immediate(int64_t imm);
2687 static bool operand_valid_for_float_immediate(double imm);
2688
2689 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
2690 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
2691 };
2692
2693 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
2694 Assembler::Membar_mask_bits b) {
2695 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
2696 }
2697
2698 Instruction_aarch64::~Instruction_aarch64() {
2699 assem->emit();
2700 }
2701
2702 #undef starti
2703
2704 // Invert a condition
2705 inline const Assembler::Condition operator~(const Assembler::Condition cond) {
2706 return Assembler::Condition(int(cond) ^ 1);
|