< prev index next >

src/cpu/aarch32/vm/macroAssembler_aarch32.hpp

Print this page
rev 8069 : 8164652: aarch32: C1 port


 142 //TODO Probably add back the 64-bit one as it will be useful for longs
 143 private:
 144 
 145   int push(unsigned int bitset, Register stack);
 146   int pop(unsigned int bitset, Register stack);
 147 
 148 public:
 149 
 150   void mov(Register dst, Address a, Condition cond = C_DFLT);
 151 
 152 
 153   void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
 154   void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
 155 
 156   // now mov instructions for loading absolute addresses and 32bit immediates
 157 
 158   inline void mov(Register dst, address addr, Condition cond = C_DFLT) {
 159     // TODO: Do Address end up as address and then passing through this method, after
 160     // being marked for relocation elsewhere? If not (as I suspect) then this can
 161     // be relaxed to mov_immediate to potentially produce shorter code sequences.
 162     mov_immediate32(dst, (u_int32_t)addr, cond, false);
 163   }
 164 
 165   inline void mov(Register dst, long l, Condition cond = C_DFLT) {
 166     mov(dst, (u_int32_t)l, cond);
 167   }
 168   inline void mov(Register dst, unsigned long l, Condition cond = C_DFLT) {
 169     mov(dst, (u_int32_t)l, cond);
 170   }
 171   inline void mov(Register dst, int i, Condition cond = C_DFLT) {
 172     mov(dst, (u_int32_t)i, cond);
 173   }
 174   inline void mov(Register dst, u_int32_t i, Condition cond = C_DFLT) {
 175     mov_immediate(dst, i, cond, false);
 176   }
 177 
 178   inline void mov(Register dst, Register src, Condition cond = C_DFLT) {
 179     Assembler::mov(dst, src, cond);
 180   }
 181   inline void mov(Register dst, Register src, shift_op shift,
 182                   Condition cond = C_DFLT) {
 183     Assembler::mov(dst, src, shift, cond);
 184   }
 185   // TODO add sflag compatibility
 186   void movptr(Register r, uintptr_t imm32, Condition cond = C_DFLT);
 187 
 188   void ret(Register reg);
 189 
 190   // Both of these are aarch64 instructions that can easily be emulated
 191   // Note that this does not quite have the same semantics as aarch64
 192   // version as this updates the s flag.
 193   void cbz(Register r, Label& l) {
 194     cmp(r, 0);


 573   void warn(const char* msg);
 574 
 575   static void debug32(char* msg, int32_t pc, int32_t regs[]);
 576 
 577   void untested()                                { stop("untested"); }
 578 
 579   void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
 580 
 581 #define should_not_reach_here() should_not_reach_here_line(__FILE__, __LINE__)
 582   void should_not_reach_here_line(const char *file, int line) {
 583     mov(rscratch1, line);
 584     reg_printf_important(file);
 585     reg_printf_important(": %d", rscratch1);
 586     stop("should_not_reach_here");
 587   }
 588 
 589   // Stack overflow checking
 590   void bang_stack_with_offset(int offset) {
 591     // stack grows down, caller passes positive offset
 592     assert(offset > 0, "must bang with negative offset");
 593     mov(rscratch2, -offset);
 594     // bang with random number from r0
 595     str(r0, Address(sp, rscratch2));





 596   }
 597 
 598   // Writes to stack successive pages until offset reached to check for
 599   // stack overflow + shadow pages.  Also, clobbers tmp
 600   void bang_stack_size(Register size, Register tmp);
 601 
 602   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
 603                                                 Register tmp,
 604                                                 int offset);
 605 
 606   // Support for serializing memory accesses between threads
 607   void serialize_memory(Register thread, Register tmp);
 608 
 609   // Arithmetics
 610 
 611   void addptr(Address dst, int32_t src) {
 612     lea(rscratch2, dst);
 613     ldr(rscratch1, Address(rscratch2));
 614     add(rscratch1, rscratch1, src);
 615     str(rscratch1, Address(rscratch2));


 636     else
 637       orr(rscratch2, rscratch2, src.as_constant());
 638     str(rscratch2, adr);
 639   }
 640 
 641   // Calls
 642 
 643   void trampoline_call(Address entry, CodeBuffer *cbuf = NULL);
 644 
 645   static bool far_branches() {
 646     return ReservedCodeCacheSize > branch_range;
 647   }
 648 
 649   // Jumps that can reach anywhere in the code cache.
 650   // Trashes tmp.
 651   void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
 652   void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
 653 
 654   static int far_branch_size() {
 655     // TODO performance issue: always generate real far jumps

 656     return 3 * 4;  // movw, movt, br



 657   }
 658 
 659   // Emit the CompiledIC call idiom
 660   void ic_call(address entry);
 661 
 662 public:
 663   // Data
 664   void mov_metadata(Register dst, Metadata* obj);
 665   Address allocate_metadata_address(Metadata* obj);
 666   Address constant_oop_address(jobject obj);
 667 
 668   void movoop(Register dst, jobject obj, bool immediate = false);
 669 
 670   // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
 671   void kernel_crc32(Register crc, Register buf, Register len,
 672         Register table0, Register table1, Register table2, Register table3,
 673         Register tmp, Register tmp2, Register tmp3);
 674 
 675 #undef VIRTUAL
 676 




 142 //TODO Probably add back the 64-bit one as it will be useful for longs
 143 private:
 144 
 145   int push(unsigned int bitset, Register stack);
 146   int pop(unsigned int bitset, Register stack);
 147 
 148 public:
 149 
 150   void mov(Register dst, Address a, Condition cond = C_DFLT);
 151 
 152 
 153   void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
 154   void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
 155 
 156   // now mov instructions for loading absolute addresses and 32bit immediates
 157 
 158   inline void mov(Register dst, address addr, Condition cond = C_DFLT) {
 159     // TODO: Do Address end up as address and then passing through this method, after
 160     // being marked for relocation elsewhere? If not (as I suspect) then this can
 161     // be relaxed to mov_immediate to potentially produce shorter code sequences.
 162     mov_immediate32(dst, (uint32_t)addr, cond, false);
 163   }
 164 
 165   inline void mov(Register dst, long l, Condition cond = C_DFLT) {
 166     mov(dst, (uint32_t)l, cond);
 167   }
 168   inline void mov(Register dst, unsigned long l, Condition cond = C_DFLT) {
 169     mov(dst, (uint32_t)l, cond);
 170   }
 171   inline void mov(Register dst, int i, Condition cond = C_DFLT) {
 172     mov(dst, (uint32_t)i, cond);
 173   }
 174   inline void mov(Register dst, uint32_t i, Condition cond = C_DFLT) {
 175     mov_immediate(dst, i, cond, false);
 176   }
 177 
 178   inline void mov(Register dst, Register src, Condition cond = C_DFLT) {
 179     Assembler::mov(dst, src, cond);
 180   }
 181   inline void mov(Register dst, Register src, shift_op shift,
 182                   Condition cond = C_DFLT) {
 183     Assembler::mov(dst, src, shift, cond);
 184   }
 185   // TODO add sflag compatibility
 186   void movptr(Register r, uintptr_t imm32, Condition cond = C_DFLT);
 187 
 188   void ret(Register reg);
 189 
 190   // Both of these are aarch64 instructions that can easily be emulated
 191   // Note that this does not quite have the same semantics as aarch64
 192   // version as this updates the s flag.
 193   void cbz(Register r, Label& l) {
 194     cmp(r, 0);


 573   void warn(const char* msg);
 574 
 575   static void debug32(char* msg, int32_t pc, int32_t regs[]);
 576 
 577   void untested()                                { stop("untested"); }
 578 
 579   void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
 580 
 581 #define should_not_reach_here() should_not_reach_here_line(__FILE__, __LINE__)
 582   void should_not_reach_here_line(const char *file, int line) {
 583     mov(rscratch1, line);
 584     reg_printf_important(file);
 585     reg_printf_important(": %d", rscratch1);
 586     stop("should_not_reach_here");
 587   }
 588 
 589   // Stack overflow checking
 590   void bang_stack_with_offset(int offset) {
 591     // stack grows down, caller passes positive offset
 592     assert(offset > 0, "must bang with negative offset");
 593     // bang with random value from r0
 594     if (operand_valid_for_add_sub_immediate(offset)) {
 595       sub(rscratch2, sp, offset);
 596       strb(r0, Address(rscratch2));
 597     } else {
 598       mov(rscratch2, offset);
 599       strb(r0, Address(sp, rscratch2, Assembler::lsl(), Address::SUB));
 600     }
 601   }
 602 
 603   // Writes to stack successive pages until offset reached to check for
 604   // stack overflow + shadow pages.  Also, clobbers tmp
 605   void bang_stack_size(Register size, Register tmp);
 606 
 607   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
 608                                                 Register tmp,
 609                                                 int offset);
 610 
 611   // Support for serializing memory accesses between threads
 612   void serialize_memory(Register thread, Register tmp);
 613 
 614   // Arithmetics
 615 
 616   void addptr(Address dst, int32_t src) {
 617     lea(rscratch2, dst);
 618     ldr(rscratch1, Address(rscratch2));
 619     add(rscratch1, rscratch1, src);
 620     str(rscratch1, Address(rscratch2));


 641     else
 642       orr(rscratch2, rscratch2, src.as_constant());
 643     str(rscratch2, adr);
 644   }
 645 
 646   // Calls
 647 
 648   void trampoline_call(Address entry, CodeBuffer *cbuf = NULL);
 649 
 650   static bool far_branches() {
 651     return ReservedCodeCacheSize > branch_range;
 652   }
 653 
 654   // Jumps that can reach anywhere in the code cache.
 655   // Trashes tmp.
 656   void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
 657   void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1);
 658 
 659   static int far_branch_size() {
 660     // TODO performance issue: always generate real far jumps
 661     if (far_branches()) {
 662       return 3 * 4;  // movw, movt, br
 663     } else {
 664       return 4;
 665     }
 666   }
 667 
 668   // Emit the CompiledIC call idiom
 669   void ic_call(address entry);
 670 
 671 public:
 672   // Data
 673   void mov_metadata(Register dst, Metadata* obj);
 674   Address allocate_metadata_address(Metadata* obj);
 675   Address constant_oop_address(jobject obj);
 676 
 677   void movoop(Register dst, jobject obj, bool immediate = false);
 678 
 679   // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
 680   void kernel_crc32(Register crc, Register buf, Register len,
 681         Register table0, Register table1, Register table2, Register table3,
 682         Register tmp, Register tmp2, Register tmp3);
 683 
 684 #undef VIRTUAL
 685 


< prev index next >