< prev index next >

src/cpu/aarch64/vm/macroAssembler_aarch64.cpp

Print this page
rev 9032 : 8139041: Redundant DMB instructions
Summary: merge consecutive DMB intstructions
Reviewd-by: kvn


1692   // input : ra: dividend
1693   //         rb: divisor
1694   //
1695   // result: either
1696   //         quotient  (= ra idiv rb)
1697   //         remainder (= ra irem rb)
1698 
1699   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1700 
1701   int idivq_offset = offset();
1702   if (! want_remainder) {
1703     sdiv(result, ra, rb);
1704   } else {
1705     sdiv(scratch, ra, rb);
1706     Assembler::msub(result, scratch, rb, ra);
1707   }
1708 
1709   return idivq_offset;
1710 }
1711 














1712 // MacroAssembler routines found actually to be needed
1713 
1714 void MacroAssembler::push(Register src)
1715 {
1716   str(src, Address(pre(esp, -1 * wordSize)));
1717 }
1718 
1719 void MacroAssembler::pop(Register dst)
1720 {
1721   ldr(dst, Address(post(esp, 1 * wordSize)));
1722 }
1723 
1724 // Note: load_unsigned_short used to be called load_unsigned_word.
1725 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1726   int off = offset();
1727   ldrh(dst, src);
1728   return off;
1729 }
1730 
1731 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {




1692   // input : ra: dividend
1693   //         rb: divisor
1694   //
1695   // result: either
1696   //         quotient  (= ra idiv rb)
1697   //         remainder (= ra irem rb)
1698 
1699   assert(ra != scratch && rb != scratch, "reg cannot be scratch");
1700 
1701   int idivq_offset = offset();
1702   if (! want_remainder) {
1703     sdiv(result, ra, rb);
1704   } else {
1705     sdiv(scratch, ra, rb);
1706     Assembler::msub(result, scratch, rb, ra);
1707   }
1708 
1709   return idivq_offset;
1710 }
1711 
1712 void MacroAssembler::membar(Membar_mask_bits order_constraint) {
1713   address prev = pc() - NativeMembar::instruction_size;
1714   if (prev == code()->last_membar()) {
1715     NativeMembar *bar = NativeMembar_at(prev);
1716     // We are merging two memory barrier instructions.  On AArch64 we
1717     // can do this simply by ORing them together.
1718     bar->set_kind(bar->get_kind() | order_constraint);
1719     BLOCK_COMMENT("merged membar");
1720   } else {
1721     code()->set_last_membar(pc());
1722     dmb(Assembler::barrier(order_constraint));
1723   }
1724 }
1725 
1726 // MacroAssembler routines found actually to be needed
1727 
1728 void MacroAssembler::push(Register src)
1729 {
1730   str(src, Address(pre(esp, -1 * wordSize)));
1731 }
1732 
1733 void MacroAssembler::pop(Register dst)
1734 {
1735   ldr(dst, Address(post(esp, 1 * wordSize)));
1736 }
1737 
1738 // Note: load_unsigned_short used to be called load_unsigned_word.
1739 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
1740   int off = offset();
1741   ldrh(dst, src);
1742   return off;
1743 }
1744 
1745 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {


< prev index next >