< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp

Print this page




 460   void mov_immediate32(Register dst, uint32_t imm32);
 461 
 462   int push(unsigned int bitset, Register stack);
 463   int pop(unsigned int bitset, Register stack);
 464 
 465   int push_fp(unsigned int bitset, Register stack);
 466   int pop_fp(unsigned int bitset, Register stack);
 467 
 468   void mov(Register dst, Address a);
 469 
 470 public:
 471   void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
 472   void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
 473 
 474   void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
 475   void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
 476 
 477   // Push and pop everything that might be clobbered by a native
 478   // runtime call except rscratch1 and rscratch2.  (They are always
 479   // scratch, so we don't have to protect them.)  Only save the lower
 480   // 64 bits of each vector register.
 481   void push_call_clobbered_registers();
 482   void pop_call_clobbered_registers();









 483 
 484   // now mov instructions for loading absolute addresses and 32 or
 485   // 64 bit integers
 486 
 487   inline void mov(Register dst, address addr)
 488   {
 489     mov_immediate64(dst, (uint64_t)addr);
 490   }
 491 
 492   inline void mov(Register dst, uint64_t imm64)
 493   {
 494     mov_immediate64(dst, imm64);
 495   }
 496 
 497   inline void movw(Register dst, uint32_t imm32)
 498   {
 499     mov_immediate32(dst, imm32);
 500   }
 501 
 502   inline void mov(Register dst, int64_t l)




 460   void mov_immediate32(Register dst, uint32_t imm32);
 461 
 462   int push(unsigned int bitset, Register stack);
 463   int pop(unsigned int bitset, Register stack);
 464 
 465   int push_fp(unsigned int bitset, Register stack);
 466   int pop_fp(unsigned int bitset, Register stack);
 467 
 468   void mov(Register dst, Address a);
 469 
 470 public:
 471   void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); }
 472   void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); }
 473 
 474   void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); }
 475   void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); }
 476 
 477   // Push and pop everything that might be clobbered by a native
 478   // runtime call except rscratch1 and rscratch2.  (They are always
 479   // scratch, so we don't have to protect them.)  Only save the lower
 480   // 64 bits of each vector register. Additonal registers can be excluded
 481   // in a passed RegSet.
 482   void push_call_clobbered_registers_except(RegSet exclude);
 483   void pop_call_clobbered_registers_except(RegSet exclude);
 484 
 485   void push_call_clobbered_registers() {
 486     push_call_clobbered_registers_except(RegSet());
 487   }
 488   void pop_call_clobbered_registers() {
 489     pop_call_clobbered_registers_except(RegSet());
 490   }
 491 
 492 
 493   // now mov instructions for loading absolute addresses and 32 or
 494   // 64 bit integers
 495 
 496   inline void mov(Register dst, address addr)
 497   {
 498     mov_immediate64(dst, (uint64_t)addr);
 499   }
 500 
 501   inline void mov(Register dst, uint64_t imm64)
 502   {
 503     mov_immediate64(dst, imm64);
 504   }
 505 
 506   inline void movw(Register dst, uint32_t imm32)
 507   {
 508     mov_immediate32(dst, imm32);
 509   }
 510 
 511   inline void mov(Register dst, int64_t l)


< prev index next >