< prev index next >

src/cpu/aarch32/vm/macroAssembler_aarch32.cpp

Print this page
rev 8069 : 8164652: aarch32: C1 port


 142     return 4 * NativeInstruction::arm_insn_sz;
 143   } else {
 144     ShouldNotReachHere();
 145   }
 146   return 0; //won't reach here
 147 }
 148 
 149 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
 150   long offset = 0;
 151   int opc = Instruction_aarch32::extract(insn, 27, 24);
 152 
 153   if(0b1010 == opc || 0b1011 == opc) {
 154     // Branch or branch with link
 155     offset = Instruction_aarch32::sextract(insn, 23, 0) * 4;
 156   } else if (0b0011 == opc) {
 157     unsigned *insn_buf = (unsigned*)insn_addr;
 158     int opc2 = Instruction_aarch32::extract(insn, 23, 21);
 159     if(0b000 == opc2) {
 160       // movw, movt (only on newer ARMs)
 161       assert(nativeInstruction_at(&insn_buf[1])->is_movt(), "wrong insns in patch");
 162       u_int32_t addr;
 163       addr  = Instruction_aarch32::extract(insn_buf[1], 19, 16) << 28;
 164       addr |= Instruction_aarch32::extract(insn_buf[1], 11, 0) << 16;
 165       addr |= Instruction_aarch32::extract(insn_buf[0], 19, 16) << 12;
 166       addr |= Instruction_aarch32::extract(insn_buf[0], 11, 0);
 167       return address(addr);
 168     } else if(0b101 == opc2) {
 169       // mov, orr, orr, orr
 170       assert(nativeInstruction_at(&insn_buf[1])->is_orr(), "wrong insns in patch");
 171       assert(nativeInstruction_at(&insn_buf[2])->is_orr(), "wrong insns in patch");
 172       assert(nativeInstruction_at(&insn_buf[3])->is_orr(), "wrong insns in patch");
 173       u_int32_t addr;
 174       addr  = Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[0], 11, 0));
 175       addr |= Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[1], 11, 0));
 176       addr |= Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[2], 11, 0));
 177       addr |= Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[3], 11, 0));
 178       return address(addr);
 179     } else {
 180       ShouldNotReachHere();
 181     }
 182   } else if (0b010 == (opc >> 1)) {
 183     // LDR, LDRB, STR, STRB
 184     offset = Instruction_aarch32::extract(insn, 11, 0);
 185     bool add = Instruction_aarch32::extract(insn, 23, 23);
 186     offset = add ? offset : -offset;
 187   } else if (0b000 == (opc >> 1)) {
 188     // LDRH, LDRSH, LDRSB, LDRD, STRH, STRD
 189     offset = Instruction_aarch32::extract(insn, 3, 0);
 190     offset |= Instruction_aarch32::extract(insn, 11, 8) << 4;
 191     bool add = Instruction_aarch32::extract(insn, 23, 23);
 192     offset = add ? offset : -offset;
 193   } else if (0b1101 == opc) {
 194     // VLDR, VSTR - NOTE VSTR(lit) is deprecated
 195     offset = Instruction_aarch32::extract(insn, 7, 0) << 2;
 196     bool add = Instruction_aarch32::extract(insn, 23, 23);
 197     offset = add ? offset : -offset;
 198   } else if (0b0010 == opc) {
 199     // ADR
 200     offset = decode_imm12(Instruction_aarch32::extract(insn, 11, 0));
 201     int code = Instruction_aarch32::extract(insn, 23, 22);
 202     switch(code) {
 203       case 0b01: offset = -offset; break;
 204       case 0b10:                   break;
 205       default: ShouldNotReachHere();
 206     }
 207   } else {
 208     ShouldNotReachHere();
 209   }
 210   //Correct offset for PC
 211   offset -= 8;
 212   return address(((u_int32_t)insn_addr + offset));
 213 }
 214 
 215 
 216 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
 217   dsb(Assembler::SY);
 218 }
 219 
 220 
 221 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
 222                                            bool clear_pc) {
 223   mov(rscratch1, 0);
 224   // we must set sp to zero to clear frame
 225   str(rscratch1, Address(rthread, JavaThread::last_Java_sp_offset()));
 226   // must clear fp, so that compiled frames are not confused; it is
 227   // possible that we need it only for debugging
 228   if (clear_fp) {
 229     str(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
 230   }
 231 
 232   if (clear_pc) {
 233     str(rscratch1, Address(rthread, JavaThread::last_Java_pc_offset()));
 234   }
 235 }
 236 
 237 // Calls to C land


 287 }
 288 
 289 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 290                                          Register last_java_fp,
 291                                          Label &L,
 292                                          Register scratch) {
 293   if (L.is_bound()) {
 294     set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
 295   } else {
 296     InstructionMark im(this);
 297     L.add_patch_at(code(), locator());
 298     set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch);
 299   }
 300 }
 301 
 302 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
 303   assert(CodeCache::find_blob(entry.target()) != NULL,
 304          "destination of far call not found in code cache");
 305   // TODO performance issue: if intented to patch later,
 306   // generate mov rX, imm; bl rX far call (to reserve space)
 307   if (entry.rspec().type() != relocInfo::none || far_branches()) {
 308     lea(tmp, entry);
 309     if (cbuf) cbuf->set_insts_mark();
 310     bl(tmp);
 311   } else {
 312     if (cbuf) cbuf->set_insts_mark();
 313     bl(entry);
 314   }
 315 }
 316 
 317 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
 318   assert(CodeCache::find_blob(entry.target()) != NULL,
 319          "destination of far call not found in code cache");
 320   assert(!external_word_Relocation::is_reloc_index((intptr_t)entry.target()), "can't far jump to reloc index)");
 321   // TODO performance issue: if intented to patch later,
 322   // generate mov rX, imm; bl rX far call (to reserve space)
 323   if (entry.rspec().type() != relocInfo::none || far_branches()) {
 324     lea(tmp, entry);
 325     if (cbuf) cbuf->set_insts_mark();
 326     b(tmp);
 327   } else {
 328     if (cbuf) cbuf->set_insts_mark();
 329     b(entry);
 330   }
 331 }
 332 
 333 int MacroAssembler::biased_locking_enter(Register lock_reg,
 334                                          Register obj_reg,
 335                                          Register swap_reg,
 336                                          Register tmp_reg,
 337                                          bool swap_reg_contains_mark,
 338                                          Label& done,
 339                                          Label* slow_case,
 340                                          BiasedLockingCounters* counters) {
 341   assert(UseBiasedLocking, "why call this otherwise?");
 342   assert_different_registers(lock_reg, obj_reg, swap_reg);
 343 


 574   //str(lr, pre(sp, -wordSize));
 575 
 576   // do the call, remove parameters
 577   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
 578 
 579   //ldr(lr, post(sp, wordSize));
 580 
 581   // reset last Java frame
 582   // Only interpreter should have to clear fp
 583   reset_last_Java_frame(true, true);
 584 
 585    // C++ interp handles this in the interpreter
 586   check_and_handle_popframe(java_thread);
 587   check_and_handle_earlyret(java_thread);
 588 
 589   if (check_exceptions) {
 590     // check for pending exceptions (java_thread is set upon return)
 591     ldr(rscratch2, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
 592     Label ok;
 593     cbz(rscratch2, ok);

 594     lea(rscratch2, RuntimeAddress(StubRoutines::forward_exception_entry()));


 595     bl(rscratch2);
 596     bind(ok);
 597   }
 598 
 599   // get oop result if there is one and reset the value in the thread
 600   if (oop_result->is_valid()) {
 601     get_vm_result(oop_result, java_thread);
 602   }
 603 }
 604 
 605 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 606   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
 607 }
 608 
 609 // Maybe emit a call via a trampoline.  If the code cache is small
 610 // trampolines won't be emitted.
 611 
 612 void MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
 613   assert(entry.rspec().type() == relocInfo::runtime_call_type
 614          || entry.rspec().type() == relocInfo::opt_virtual_call_type
 615          || entry.rspec().type() == relocInfo::static_call_type
 616          || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
 617 
 618   //FIXME This block
 619   bool compile_in_scratch_emit_size = false;
 620   #ifdef COMPILER2
 621   compile_in_scratch_emit_size = Compile::current()->in_scratch_emit_size();
 622   #endif
 623 
 624   if (cbuf) cbuf->set_insts_mark();
 625   relocate(entry.rspec());
 626 

 627   // Have make trampline such way: destination address should be raw 4 byte value,
 628   // so it's patching could be done atomically.

 629   add(lr, r15_pc, NativeCall::instruction_size - 2 * NativeInstruction::arm_insn_sz);
 630   ldr(r15_pc, Address(r15_pc, 4)); // Address does correction for offset from pc base
 631   emit_int32((uintptr_t) entry.target());
 632   // possibly pad the call to the NativeCall size to make patching happy
 633   for (int i = NativeCall::instruction_size; i > 3 * NativeInstruction::arm_insn_sz; i -= NativeInstruction::arm_insn_sz)
 634     nop();



 635 }
 636 
 637 void MacroAssembler::ic_call(address entry) {
 638   RelocationHolder rh = virtual_call_Relocation::spec(pc());
 639   // address const_ptr = long_constant((jlong)Universe::non_oop_word());
 640   // unsigned long offset;
 641   // ldr_constant(rscratch2, const_ptr);
 642   movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
 643   trampoline_call(Address(entry, rh));
 644 }
 645 
 646 // Implementation of call_VM versions
 647 
 648 void MacroAssembler::call_VM(Register oop_result,
 649                              address entry_point,
 650                              bool check_exceptions) {
 651   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 652 }
 653 
 654 void MacroAssembler::call_VM(Register oop_result,


1724   _masm = masm;
1725   _masm->mov(rscratch1, ExternalAddress((address)flag_addr));
1726   _masm->ldrb(rscratch1, rscratch1);
1727   _masm->cmp(rscratch1, 0);
1728   _masm->b(_label, value ? Assembler::NE : Assembler::EQ);
1729 }
1730 
1731 SkipIfEqual::~SkipIfEqual() {
1732   _masm->bind(_label);
1733 }
1734 
1735 void MacroAssembler::cmpptr(Register src1, Address src2) {
1736   mov(rscratch1, src2);
1737   ldr(rscratch1, Address(rscratch1));
1738   cmp(src1, rscratch1);
1739 }
1740 
1741 void MacroAssembler::store_check(Register obj) {
1742   // Does a store check for the oop in register obj. The content of
1743   // register obj is destroyed afterwards.
1744   store_check_part_1(obj);
1745   store_check_part_2(obj);
1746 }
1747 
1748 void MacroAssembler::store_check(Register obj, Address dst) {
1749   store_check(obj);
1750 }
1751 
1752 
1753 // split the store check operation so that other instructions can be scheduled inbetween
1754 void MacroAssembler::store_check_part_1(Register obj) {
1755   BarrierSet* bs = Universe::heap()->barrier_set();
1756   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1757   lsr(obj, obj, CardTableModRefBS::card_shift);
1758 }
1759 
1760 void MacroAssembler::store_check_part_2(Register obj) {
1761   BarrierSet* bs = Universe::heap()->barrier_set();
1762   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1763   CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1764   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1765 
1766   // The calculation for byte_map_base is as follows:
1767   // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
1768   // So this essentially converts an address to a displacement and
1769   // it will never need to be relocated.
1770 
1771   // FIXME: It's not likely that disp will fit into an offset so we
1772   // don't bother to check, but it could save an instruction.
1773   intptr_t disp = (intptr_t) ct->byte_map_base;
1774   mov(rscratch1, disp);
1775   mov(rscratch2, 0);
1776   strb(rscratch2, Address(obj, rscratch1));













1777 }
1778 
1779 void MacroAssembler::load_klass(Register dst, Register src) {
1780   ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
1781 }
1782 
1783 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
1784   ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
1785   cmp(trial_klass, tmp);
1786 }
1787 
1788 void MacroAssembler::load_prototype_header(Register dst, Register src) {
1789   load_klass(dst, src);
1790   ldr(dst, Address(dst, Klass::prototype_header_offset()));
1791 }
1792 
1793 void MacroAssembler::store_klass(Register dst, Register src) {
1794   str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
1795 }
1796 




 142     return 4 * NativeInstruction::arm_insn_sz;
 143   } else {
 144     ShouldNotReachHere();
 145   }
 146   return 0; //won't reach here
 147 }
 148 
 149 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
 150   long offset = 0;
 151   int opc = Instruction_aarch32::extract(insn, 27, 24);
 152 
 153   if(0b1010 == opc || 0b1011 == opc) {
 154     // Branch or branch with link
 155     offset = Instruction_aarch32::sextract(insn, 23, 0) * 4;
 156   } else if (0b0011 == opc) {
 157     unsigned *insn_buf = (unsigned*)insn_addr;
 158     int opc2 = Instruction_aarch32::extract(insn, 23, 21);
 159     if(0b000 == opc2) {
 160       // movw, movt (only on newer ARMs)
 161       assert(nativeInstruction_at(&insn_buf[1])->is_movt(), "wrong insns in patch");
 162       uint32_t addr;
 163       addr  = Instruction_aarch32::extract(insn_buf[1], 19, 16) << 28;
 164       addr |= Instruction_aarch32::extract(insn_buf[1], 11, 0) << 16;
 165       addr |= Instruction_aarch32::extract(insn_buf[0], 19, 16) << 12;
 166       addr |= Instruction_aarch32::extract(insn_buf[0], 11, 0);
 167       return address(addr);
 168     } else if(0b101 == opc2) {
 169       // mov, orr, orr, orr
 170       assert(nativeInstruction_at(&insn_buf[1])->is_orr(), "wrong insns in patch");
 171       assert(nativeInstruction_at(&insn_buf[2])->is_orr(), "wrong insns in patch");
 172       assert(nativeInstruction_at(&insn_buf[3])->is_orr(), "wrong insns in patch");
 173       uint32_t addr;
 174       addr  = Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[0], 11, 0));
 175       addr |= Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[1], 11, 0));
 176       addr |= Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[2], 11, 0));
 177       addr |= Assembler::decode_imm12(Instruction_aarch32::extract(insn_buf[3], 11, 0));
 178       return address(addr);
 179     } else {
 180       ShouldNotReachHere();
 181     }
 182   } else if (0b010 == (opc >> 1)) {
 183     // LDR, LDRB, STR, STRB
 184     offset = Instruction_aarch32::extract(insn, 11, 0);
 185     bool add = Instruction_aarch32::extract(insn, 23, 23);
 186     offset = add ? offset : -offset;
 187   } else if (0b000 == (opc >> 1)) {
 188     // LDRH, LDRSH, LDRSB, LDRD, STRH, STRD
 189     offset = Instruction_aarch32::extract(insn, 3, 0);
 190     offset |= Instruction_aarch32::extract(insn, 11, 8) << 4;
 191     bool add = Instruction_aarch32::extract(insn, 23, 23);
 192     offset = add ? offset : -offset;
 193   } else if (0b1101 == opc) {
 194     // VLDR, VSTR - NOTE VSTR(lit) is deprecated
 195     offset = Instruction_aarch32::extract(insn, 7, 0) << 2;
 196     bool add = Instruction_aarch32::extract(insn, 23, 23);
 197     offset = add ? offset : -offset;
 198   } else if (0b0010 == opc) {
 199     // ADR
 200     offset = decode_imm12(Instruction_aarch32::extract(insn, 11, 0));
 201     int code = Instruction_aarch32::extract(insn, 23, 22);
 202     switch(code) {
 203       case 0b01: offset = -offset; break;
 204       case 0b10:                   break;
 205       default: ShouldNotReachHere();
 206     }
 207   } else {
 208     ShouldNotReachHere();
 209   }
 210   //Correct offset for PC
 211   offset -= 8;
 212   return address(((uint32_t)insn_addr + offset));
 213 }
 214 
 215 
 216 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
 217   dmb(Assembler::ISH);
 218 }
 219 
 220 
 221 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
 222                                            bool clear_pc) {
 223   mov(rscratch1, 0);
 224   // we must set sp to zero to clear frame
 225   str(rscratch1, Address(rthread, JavaThread::last_Java_sp_offset()));
 226   // must clear fp, so that compiled frames are not confused; it is
 227   // possible that we need it only for debugging
 228   if (clear_fp) {
 229     str(rscratch1, Address(rthread, JavaThread::last_Java_fp_offset()));
 230   }
 231 
 232   if (clear_pc) {
 233     str(rscratch1, Address(rthread, JavaThread::last_Java_pc_offset()));
 234   }
 235 }
 236 
 237 // Calls to C land


 287 }
 288 
 289 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
 290                                          Register last_java_fp,
 291                                          Label &L,
 292                                          Register scratch) {
 293   if (L.is_bound()) {
 294     set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
 295   } else {
 296     InstructionMark im(this);
 297     L.add_patch_at(code(), locator());
 298     set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch);
 299   }
 300 }
 301 
 302 void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
 303   assert(CodeCache::find_blob(entry.target()) != NULL,
 304          "destination of far call not found in code cache");
 305   // TODO performance issue: if intented to patch later,
 306   // generate mov rX, imm; bl rX far call (to reserve space)
 307   if (far_branches()) {
 308     lea(tmp, entry);
 309     if (cbuf) cbuf->set_insts_mark();
 310     bl(tmp);
 311   } else {
 312     if (cbuf) cbuf->set_insts_mark();
 313     bl(entry);
 314   }
 315 }
 316 
 317 void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
 318   assert(CodeCache::find_blob(entry.target()) != NULL,
 319          "destination of far call not found in code cache");
 320   assert(!external_word_Relocation::is_reloc_index((intptr_t)entry.target()), "can't far jump to reloc index)");
 321   if (far_branches()) {


 322     lea(tmp, entry);
 323     if (cbuf) cbuf->set_insts_mark();
 324     b(tmp);
 325   } else {
 326     if (cbuf) cbuf->set_insts_mark();
 327     b(entry);
 328   }
 329 }
 330 
 331 int MacroAssembler::biased_locking_enter(Register lock_reg,
 332                                          Register obj_reg,
 333                                          Register swap_reg,
 334                                          Register tmp_reg,
 335                                          bool swap_reg_contains_mark,
 336                                          Label& done,
 337                                          Label* slow_case,
 338                                          BiasedLockingCounters* counters) {
 339   assert(UseBiasedLocking, "why call this otherwise?");
 340   assert_different_registers(lock_reg, obj_reg, swap_reg);
 341 


 572   //str(lr, pre(sp, -wordSize));
 573 
 574   // do the call, remove parameters
 575   MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
 576 
 577   //ldr(lr, post(sp, wordSize));
 578 
 579   // reset last Java frame
 580   // Only interpreter should have to clear fp
 581   reset_last_Java_frame(true, true);
 582 
 583    // C++ interp handles this in the interpreter
 584   check_and_handle_popframe(java_thread);
 585   check_and_handle_earlyret(java_thread);
 586 
 587   if (check_exceptions) {
 588     // check for pending exceptions (java_thread is set upon return)
 589     ldr(rscratch2, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
 590     Label ok;
 591     cbz(rscratch2, ok);
 592 
 593     lea(rscratch2, RuntimeAddress(StubRoutines::forward_exception_entry()));
 594     // forward_exception uses LR to choose exception handler but LR is trashed by previous code
 595     // since we used to get here from interpreted code BL is acceptable way to acquire correct LR (see StubGenerator::generate_forward_exception)
 596     bl(rscratch2);
 597     bind(ok);
 598   }
 599 
 600   // get oop result if there is one and reset the value in the thread
 601   if (oop_result->is_valid()) {
 602     get_vm_result(oop_result, java_thread);
 603   }
 604 }
 605 
 606 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
 607   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
 608 }
 609 
 610 // Maybe emit a call via a trampoline.  If the code cache is small
 611 // trampolines won't be emitted.
 612 
 613 void MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
 614   assert(entry.rspec().type() == relocInfo::runtime_call_type
 615          || entry.rspec().type() == relocInfo::opt_virtual_call_type
 616          || entry.rspec().type() == relocInfo::static_call_type
 617          || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
 618 
 619   if (cbuf) {
 620     cbuf->set_insts_mark();
 621   }





 622 
 623   if (far_branches()) {
 624     // Have make trampline such way: destination address should be raw 4 byte value,
 625     // so it's patching could be done atomically.
 626     relocate(entry.rspec());
 627     add(lr, r15_pc, NativeCall::instruction_size - 2 * NativeInstruction::arm_insn_sz);
 628     ldr(r15_pc, Address(r15_pc, 4));
 629     emit_int32((uintptr_t) entry.target());
 630     // possibly pad the call to the NativeCall size to make patching happy
 631     for (int i = NativeCall::instruction_size; i > 3 * NativeInstruction::arm_insn_sz; i -= NativeInstruction::arm_insn_sz)
 632       nop();
 633   } else {
 634     bl(entry);
 635   }
 636 }
 637 
 638 void MacroAssembler::ic_call(address entry) {
 639   RelocationHolder rh = virtual_call_Relocation::spec(pc());
 640   // address const_ptr = long_constant((jlong)Universe::non_oop_word());
 641   // unsigned long offset;
 642   // ldr_constant(rscratch2, const_ptr);
 643   movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
 644   trampoline_call(Address(entry, rh));
 645 }
 646 
 647 // Implementation of call_VM versions
 648 
 649 void MacroAssembler::call_VM(Register oop_result,
 650                              address entry_point,
 651                              bool check_exceptions) {
 652   call_VM_helper(oop_result, entry_point, 0, check_exceptions);
 653 }
 654 
 655 void MacroAssembler::call_VM(Register oop_result,


1725   _masm = masm;
1726   _masm->mov(rscratch1, ExternalAddress((address)flag_addr));
1727   _masm->ldrb(rscratch1, rscratch1);
1728   _masm->cmp(rscratch1, 0);
1729   _masm->b(_label, value ? Assembler::NE : Assembler::EQ);
1730 }
1731 
1732 SkipIfEqual::~SkipIfEqual() {
1733   _masm->bind(_label);
1734 }
1735 
1736 void MacroAssembler::cmpptr(Register src1, Address src2) {
1737   mov(rscratch1, src2);
1738   ldr(rscratch1, Address(rscratch1));
1739   cmp(src1, rscratch1);
1740 }
1741 
1742 void MacroAssembler::store_check(Register obj) {
1743   // Does a store check for the oop in register obj. The content of
1744   // register obj is destroyed afterwards.















1745 

1746   BarrierSet* bs = Universe::heap()->barrier_set();
1747   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1748   CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1749   assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1750 
1751   // The calculation for byte_map_base is as follows:
1752   // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
1753   // So this essentially converts an address to a displacement and
1754   // it will never need to be relocated.
1755 
1756   // FIXME: It's not likely that disp will fit into an offset so we
1757   // don't bother to check, but it could save an instruction.
1758   intptr_t disp = (intptr_t) ct->byte_map_base;
1759   mov(rscratch1, disp);
1760   assert((disp & 0xff) == 0, "fix store char 0 below");
1761   strb(rscratch1, Address(rscratch1, obj, lsr((int) CardTableModRefBS::card_shift)));
1762 }
1763 
1764 void MacroAssembler::store_check(Register obj, Address dst) {
1765   store_check(obj);
1766 }
1767 
1768 // split the store check operation so that other instructions can be scheduled inbetween
1769 void MacroAssembler::store_check_part_1(Register obj) {
1770   ShouldNotCallThis();
1771 }
1772 
1773 void MacroAssembler::store_check_part_2(Register obj) {
1774   ShouldNotCallThis();
1775 }
1776 
1777 void MacroAssembler::load_klass(Register dst, Register src) {
1778   ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
1779 }
1780 
1781 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
1782   ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
1783   cmp(trial_klass, tmp);
1784 }
1785 
1786 void MacroAssembler::load_prototype_header(Register dst, Register src) {
1787   load_klass(dst, src);
1788   ldr(dst, Address(dst, Klass::prototype_header_offset()));
1789 }
1790 
1791 void MacroAssembler::store_klass(Register dst, Register src) {
1792   str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
1793 }
1794 


< prev index next >