175 void membar(Membar_mask_bits order_constraint);
176
177 using Assembler::ldr;
178 using Assembler::str;
179 using Assembler::ldrw;
180 using Assembler::strw;
181
182 void ldr(Register Rx, const Address &adr);
183 void ldrw(Register Rw, const Address &adr);
184 void str(Register Rx, const Address &adr);
185 void strw(Register Rx, const Address &adr);
186
187 // Frame creation and destruction shared between JITs.
188 void build_frame(int framesize);
189 void remove_frame(int framesize);
190
191 virtual void _call_Unimplemented(address call_site) {
192 mov(rscratch2, call_site);
193 }
194
195 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
196
197 // aliases defined in AARCH64 spec
198
199 template<class T>
200 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
201
202 inline void cmp(Register Rd, unsigned char imm8) { subs(zr, Rd, imm8); }
203 inline void cmp(Register Rd, unsigned imm) __attribute__ ((deprecated));
204
205 inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
206 inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); }
207
208 void cset(Register Rd, Assembler::Condition cond) {
209 csinc(Rd, zr, zr, ~cond);
210 }
211 void csetw(Register Rd, Assembler::Condition cond) {
212 csincw(Rd, zr, zr, ~cond);
213 }
214
215 void cneg(Register Rd, Register Rn, Assembler::Condition cond) {
216 csneg(Rd, Rn, Rn, ~cond);
217 }
218 void cnegw(Register Rd, Register Rn, Assembler::Condition cond) {
219 csnegw(Rd, Rn, Rn, ~cond);
220 }
221
222 inline void movw(Register Rd, Register Rn) {
223 if (Rd == sp || Rn == sp) {
457 private:
458
459 void mov_immediate64(Register dst, uint64_t imm64);
460 void mov_immediate32(Register dst, uint32_t imm32);
461
462 int push(unsigned int bitset, Register stack);
463 int pop(unsigned int bitset, Register stack);
464
465 int push_fp(unsigned int bitset, Register stack);
466 int pop_fp(unsigned int bitset, Register stack);
467
468 void mov(Register dst, Address a);
469
470 public:
471 void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
472 void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
473
474 void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
475 void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
476
477 // Push and pop everything that might be clobbered by a native
478 // runtime call except rscratch1 and rscratch2. (They are always
479 // scratch, so we don't have to protect them.) Only save the lower
480 // 64 bits of each vector register. Additonal registers can be excluded
481 // in a passed RegSet.
482 void push_call_clobbered_registers_except(RegSet exclude);
483 void pop_call_clobbered_registers_except(RegSet exclude);
484
485 void push_call_clobbered_registers() {
486 push_call_clobbered_registers_except(RegSet());
487 }
488 void pop_call_clobbered_registers() {
489 pop_call_clobbered_registers_except(RegSet());
490 }
491
492
493 // now mov instructions for loading absolute addresses and 32 or
494 // 64 bit integers
495
496 inline void mov(Register dst, address addr)
520
521 void mov(Register dst, RegisterOrConstant src) {
522 if (src.is_register())
523 mov(dst, src.as_register());
524 else
525 mov(dst, src.as_constant());
526 }
527
528 void movptr(Register r, uintptr_t imm64);
529
530 void mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32);
531
532 void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
533 orr(Vd, T, Vn, Vn);
534 }
535
536 public:
537
538 // Generalized Test Bit And Branch, including a "far" variety which
539 // spans more than 32KiB.
540 void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool far = false) {
541 assert(cond == EQ || cond == NE, "must be");
542
543 if (far)
544 cond = ~cond;
545
546 void (Assembler::* branch)(Register Rt, int bitpos, Label &L);
547 if (cond == Assembler::EQ)
548 branch = &Assembler::tbz;
549 else
550 branch = &Assembler::tbnz;
551
552 if (far) {
553 Label L;
554 (this->*branch)(Rt, bitpos, L);
555 b(dest);
556 bind(L);
557 } else {
558 (this->*branch)(Rt, bitpos, dest);
559 }
560 }
561
562 // macro instructions for accessing and updating floating point
563 // status register
564 //
565 // FPSR : op1 == 011
566 // CRn == 0100
567 // CRm == 0100
568 // op2 == 001
569
570 inline void get_fpsr(Register reg)
571 {
572 mrs(0b11, 0b0100, 0b0100, 0b001, reg);
|
175 void membar(Membar_mask_bits order_constraint);
176
177 using Assembler::ldr;
178 using Assembler::str;
179 using Assembler::ldrw;
180 using Assembler::strw;
181
182 void ldr(Register Rx, const Address &adr);
183 void ldrw(Register Rw, const Address &adr);
184 void str(Register Rx, const Address &adr);
185 void strw(Register Rx, const Address &adr);
186
187 // Frame creation and destruction shared between JITs.
188 void build_frame(int framesize);
189 void remove_frame(int framesize);
190
191 virtual void _call_Unimplemented(address call_site) {
192 mov(rscratch2, call_site);
193 }
194
195 // Microsoft's MSVC team thinks that the __FUNCSIG__ is approximately (sympathy for calling conventions) equivalent to __PRETTY_FUNCTION__
196 // Also, from Clang patch: "It is very similar to GCC's PRETTY_FUNCTION, except it prints the calling convention."
197 // https://reviews.llvm.org/D3311
198
199 #ifdef _WIN64
200 #define call_Unimplemented() _call_Unimplemented((address)__FUNCSIG__)
201 #else
202 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__)
203 #endif
204
205 // aliases defined in AARCH64 spec
206
207 template<class T>
208 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
209
210 inline void cmp(Register Rd, unsigned char imm8) { subs(zr, Rd, imm8); }
211 inline void cmp(Register Rd, unsigned imm) = delete;
212
213 inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
214 inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); }
215
216 void cset(Register Rd, Assembler::Condition cond) {
217 csinc(Rd, zr, zr, ~cond);
218 }
219 void csetw(Register Rd, Assembler::Condition cond) {
220 csincw(Rd, zr, zr, ~cond);
221 }
222
223 void cneg(Register Rd, Register Rn, Assembler::Condition cond) {
224 csneg(Rd, Rn, Rn, ~cond);
225 }
226 void cnegw(Register Rd, Register Rn, Assembler::Condition cond) {
227 csnegw(Rd, Rn, Rn, ~cond);
228 }
229
230 inline void movw(Register Rd, Register Rn) {
231 if (Rd == sp || Rn == sp) {
465 private:
466
467 void mov_immediate64(Register dst, uint64_t imm64);
468 void mov_immediate32(Register dst, uint32_t imm32);
469
470 int push(unsigned int bitset, Register stack);
471 int pop(unsigned int bitset, Register stack);
472
473 int push_fp(unsigned int bitset, Register stack);
474 int pop_fp(unsigned int bitset, Register stack);
475
476 void mov(Register dst, Address a);
477
478 public:
479 void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
480 void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
481
482 void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
483 void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
484
485 static RegSet call_clobbered_registers();
486
487 // Push and pop everything that might be clobbered by a native
488 // runtime call except rscratch1 and rscratch2. (They are always
489 // scratch, so we don't have to protect them.) Only save the lower
490 // 64 bits of each vector register. Additonal registers can be excluded
491 // in a passed RegSet.
492 void push_call_clobbered_registers_except(RegSet exclude);
493 void pop_call_clobbered_registers_except(RegSet exclude);
494
495 void push_call_clobbered_registers() {
496 push_call_clobbered_registers_except(RegSet());
497 }
498 void pop_call_clobbered_registers() {
499 pop_call_clobbered_registers_except(RegSet());
500 }
501
502
503 // now mov instructions for loading absolute addresses and 32 or
504 // 64 bit integers
505
506 inline void mov(Register dst, address addr)
530
531 void mov(Register dst, RegisterOrConstant src) {
532 if (src.is_register())
533 mov(dst, src.as_register());
534 else
535 mov(dst, src.as_constant());
536 }
537
538 void movptr(Register r, uintptr_t imm64);
539
540 void mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32);
541
542 void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
543 orr(Vd, T, Vn, Vn);
544 }
545
546 public:
547
548 // Generalized Test Bit And Branch, including a "far" variety which
549 // spans more than 32KiB.
550 void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool isfar = false) {
551 assert(cond == EQ || cond == NE, "must be");
552
553 if (isfar)
554 cond = ~cond;
555
556 void (Assembler::* branch)(Register Rt, int bitpos, Label &L);
557 if (cond == Assembler::EQ)
558 branch = &Assembler::tbz;
559 else
560 branch = &Assembler::tbnz;
561
562 if (isfar) {
563 Label L;
564 (this->*branch)(Rt, bitpos, L);
565 b(dest);
566 bind(L);
567 } else {
568 (this->*branch)(Rt, bitpos, dest);
569 }
570 }
571
572 // macro instructions for accessing and updating floating point
573 // status register
574 //
575 // FPSR : op1 == 011
576 // CRn == 0100
577 // CRm == 0100
578 // op2 == 001
579
580 inline void get_fpsr(Register reg)
581 {
582 mrs(0b11, 0b0100, 0b0100, 0b001, reg);
|