src/hotspot/cpu/arm/interp_masm_arm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/hotspot/cpu/arm/interp_masm_arm.cpp	Mon Sep 17 10:30:10 2018
--- new/src/hotspot/cpu/arm/interp_masm_arm.cpp	Mon Sep 17 10:30:09 2018

*** 52,90 **** --- 52,78 ---- InterpreterMacroAssembler::InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) { } void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { - #if defined(ASSERT) && !defined(AARCH64) // Ensure that last_sp is not filled. { Label L; ldr(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); cbz(Rtemp, L); stop("InterpreterMacroAssembler::call_VM_helper: last_sp != NULL"); bind(L); } - #endif // ASSERT && !AARCH64 // Rbcp must be saved/restored since it may change due to GC. save_bcp(); #ifdef AARCH64 check_no_cached_stack_top(Rtemp); save_stack_top(); check_extended_sp(Rtemp); cut_sp_before_call(); #endif // AARCH64 // super call MacroAssembler::call_VM_helper(oop_result, entry_point, number_of_arguments, check_exceptions); #ifdef AARCH64 // Restore SP to extended SP restore_sp_after_call(Rtemp); check_stack_top(); clear_cached_stack_top(); #endif // AARCH64 // Restore interpreter specific registers. restore_bcp(); restore_method(); }
*** 126,153 **** --- 114,135 ---- ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset())); const Address tos_addr(thread_state, JvmtiThreadState::earlyret_tos_offset()); const Address oop_addr(thread_state, JvmtiThreadState::earlyret_oop_offset()); const Address val_addr(thread_state, JvmtiThreadState::earlyret_value_offset()); #ifndef AARCH64 const Address val_addr_hi(thread_state, JvmtiThreadState::earlyret_value_offset() + in_ByteSize(wordSize)); #endif // !AARCH64 Register zero = zero_register(Rtemp); switch (state) { case atos: ldr(R0_tos, oop_addr); str(zero, oop_addr); interp_verify_oop(R0_tos, state, __FILE__, __LINE__); break; #ifdef AARCH64 case ltos: ldr(R0_tos, val_addr); break; #else case ltos: ldr(R1_tos_hi, val_addr_hi); // fall through #endif // AARCH64 case btos: // fall through case ztos: // fall through case ctos: // fall through case stos: // fall through case itos: ldr_s32(R0_tos, val_addr); break;
*** 161,173 **** --- 143,153 ---- case vtos: /* nothing to do */ break; default : ShouldNotReachHere(); } // Clean up tos value in the thread object str(zero, val_addr); #ifndef AARCH64 str(zero, val_addr_hi); #endif // !AARCH64 mov(Rtemp, (int) ilgl); str_32(Rtemp, tos_addr); }
*** 218,228 **** --- 198,207 ---- // load bytes of index separately to avoid unaligned access ldrb(index, Address(Rbcp, bcp_offset+1)); ldrb(tmp_reg, Address(Rbcp, bcp_offset)); orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte)); } else if (index_size == sizeof(u4)) { // TODO-AARCH64: consider using unaligned access here ldrb(index, Address(Rbcp, bcp_offset+3)); ldrb(tmp_reg, Address(Rbcp, bcp_offset+2)); orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte)); ldrb(tmp_reg, Address(Rbcp, bcp_offset+1)); orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
*** 250,275 **** --- 229,248 ---- // load constant pool cache pointer ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize)); // convert from field index to ConstantPoolCacheEntry index assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); // TODO-AARCH64 merge this shift with shift "add(..., Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord))" after this method is called logical_shift_left(index, index, 2); } // Sets cache, index, bytecode. void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size) { get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); // caution index and bytecode can be the same add(bytecode, cache, AsmOperand(index, lsl, LogBytesPerWord)); #ifdef AARCH64 add(bytecode, bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); ldarb(bytecode, bytecode); #else ldrb(bytecode, Address(bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()))); TemplateTable::volatile_barrier(MacroAssembler::LoadLoad, noreg, true); #endif // AARCH64 } // Sets cache. Blows reg_tmp. void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register reg_tmp, int bcp_offset, size_t index_size) { assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
*** 363,397 **** --- 336,360 ---- // Load objArrayOop of secondary supers. ldr(supers_arr, Address(Rsub_klass, Klass::secondary_supers_offset())); ldr_u32(supers_cnt, Address(supers_arr, Array<Klass*>::length_offset_in_bytes())); // Load the array length #ifdef AARCH64 cbz(supers_cnt, not_subtype); add(supers_arr, supers_arr, Array<Klass*>::base_offset_in_bytes()); #else cmp(supers_cnt, 0); // Skip to the start of array elements and prefetch the first super-klass. ldr(cur_super, Address(supers_arr, Array<Klass*>::base_offset_in_bytes(), pre_indexed), ne); b(not_subtype, eq); #endif // AARCH64 bind(loop); #ifdef AARCH64 ldr(cur_super, Address(supers_arr, wordSize, post_indexed)); #endif // AARCH64 cmp(cur_super, Rsuper_klass); b(update_cache, eq); subs(supers_cnt, supers_cnt, 1); #ifndef AARCH64 ldr(cur_super, Address(supers_arr, wordSize, pre_indexed), ne); #endif // !AARCH64 b(loop, ne); b(not_subtype);
*** 417,453 **** --- 380,401 ---- assert(r != Rstack_top, "unpredictable instruction"); ldr_s32(r, Address(Rstack_top, wordSize, post_indexed)); zap_high_non_significant_bits(r); } #ifdef AARCH64 void InterpreterMacroAssembler::pop_l(Register r) { assert(r != Rstack_top, "unpredictable instruction"); ldr(r, Address(Rstack_top, 2*wordSize, post_indexed)); } #else void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { assert_different_registers(lo, hi); assert(lo < hi, "lo must be < hi"); pop(RegisterSet(lo) | RegisterSet(hi)); } #endif // AARCH64 void InterpreterMacroAssembler::pop_f(FloatRegister fd) { #ifdef AARCH64 ldr_s(fd, Address(Rstack_top, wordSize, post_indexed)); #else fpops(fd); #endif // AARCH64 } void InterpreterMacroAssembler::pop_d(FloatRegister fd) { #ifdef AARCH64 ldr_d(fd, Address(Rstack_top, 2*wordSize, post_indexed)); #else fpopd(fd); #endif // AARCH64 } // Transition vtos -> state. Blows R0, R1. Sets TOS cached value. void InterpreterMacroAssembler::pop(TosState state) {
*** 456,470 **** --- 404,414 ---- case btos: // fall through case ztos: // fall through case ctos: // fall through case stos: // fall through case itos: pop_i(R0_tos); break; #ifdef AARCH64 case ltos: pop_l(R0_tos); break; #else case ltos: pop_l(R0_tos_lo, R1_tos_hi); break; #endif // AARCH64 #ifdef __SOFTFP__ case ftos: pop_i(R0_tos); break; case dtos: pop_l(R0_tos_lo, R1_tos_hi); break; #else case ftos: pop_f(S0_tos); break;
*** 486,525 **** --- 430,451 ---- assert(r != Rstack_top, "unpredictable instruction"); str_32(r, Address(Rstack_top, -wordSize, pre_indexed)); check_stack_top_on_expansion(); } #ifdef AARCH64 void InterpreterMacroAssembler::push_l(Register r) { assert(r != Rstack_top, "unpredictable instruction"); stp(r, ZR, Address(Rstack_top, -2*wordSize, pre_indexed)); check_stack_top_on_expansion(); } #else void InterpreterMacroAssembler::push_l(Register lo, Register hi) { assert_different_registers(lo, hi); assert(lo < hi, "lo must be < hi"); push(RegisterSet(lo) | RegisterSet(hi)); } #endif // AARCH64 void InterpreterMacroAssembler::push_f() { #ifdef AARCH64 str_s(S0_tos, Address(Rstack_top, -wordSize, pre_indexed)); check_stack_top_on_expansion(); #else fpushs(S0_tos); #endif // AARCH64 } void InterpreterMacroAssembler::push_d() { #ifdef AARCH64 str_d(D0_tos, Address(Rstack_top, -2*wordSize, pre_indexed)); check_stack_top_on_expansion(); #else fpushd(D0_tos); #endif // AARCH64 } // Transition state -> vtos. Blows Rtemp. void InterpreterMacroAssembler::push(TosState state) { interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
*** 528,542 **** --- 454,464 ---- case btos: // fall through case ztos: // fall through case ctos: // fall through case stos: // fall through case itos: push_i(R0_tos); break; #ifdef AARCH64 case ltos: push_l(R0_tos); break; #else case ltos: push_l(R0_tos_lo, R1_tos_hi); break; #endif // AARCH64 #ifdef __SOFTFP__ case ftos: push_i(R0_tos); break; case dtos: push_l(R0_tos_lo, R1_tos_hi); break; #else case ftos: push_f(); break;
*** 546,556 **** --- 468,477 ---- default : ShouldNotReachHere(); } } #ifndef AARCH64 // Converts return value in R0/R1 (interpreter calling conventions) to TOS cached value. void InterpreterMacroAssembler::convert_retval_to_tos(TosState state) { #if (!defined __SOFTFP__ && !defined __ABI_HARD__) // According to interpreter calling conventions, result is returned in R0/R1,
*** 574,584 **** --- 495,504 ---- fmrrd(R0, R1, D0_tos); } #endif // !__SOFTFP__ && !__ABI_HARD__ } #endif // !AARCH64 // Helpers for swap and dup void InterpreterMacroAssembler::load_ptr(int n, Register val) { ldr(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n)));
*** 588,611 **** --- 508,523 ---- str(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n))); } void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { #ifdef AARCH64 check_no_cached_stack_top(Rtemp); save_stack_top(); cut_sp_before_call(); mov(Rparams, Rstack_top); #endif // AARCH64 // set sender sp mov(Rsender_sp, SP); #ifndef AARCH64 // record last_sp str(Rsender_sp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); #endif // !AARCH64 } // Jump to from_interpreted entry of a call unless single stepping is possible // in this thread in which case we must call the i2i entry void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
*** 617,639 **** --- 529,540 ---- // JVMTI events, such as single-stepping, are implemented partly by avoiding running // compiled code in threads for which the event is enabled. Check here for // interp_only_mode if these events CAN be enabled. ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset())); #ifdef AARCH64 { Label not_interp_only_mode; cbz(Rtemp, not_interp_only_mode); indirect_jump(Address(method, Method::interpreter_entry_offset()), Rtemp); bind(not_interp_only_mode); } #else cmp(Rtemp, 0); ldr(PC, Address(method, Method::interpreter_entry_offset()), ne); #endif // AARCH64 } indirect_jump(Address(method, Method::from_interpreted_offset()), Rtemp); }
*** 656,671 **** --- 557,567 ---- void InterpreterMacroAssembler::dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop) { if (VerifyActivationFrameSize) { Label L; #ifdef AARCH64 mov(Rtemp, SP); sub(Rtemp, FP, Rtemp); #else sub(Rtemp, FP, SP); #endif // AARCH64 int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; cmp(Rtemp, min_frame_size); b(L, ge); stop("broken stack frame"); bind(L);
*** 690,709 **** --- 586,599 ---- if (table_mode == DispatchDefault) { if (state == vtos) { indirect_jump(Address::indexed_ptr(RdispatchTable, R3_bytecode), Rtemp); } else { #ifdef AARCH64 sub(Rtemp, R3_bytecode, (Interpreter::distance_from_dispatch_table(vtos) - Interpreter::distance_from_dispatch_table(state))); indirect_jump(Address::indexed_ptr(RdispatchTable, Rtemp), Rtemp); #else // on 32-bit ARM this method is faster than the one above. sub(Rtemp, RdispatchTable, (Interpreter::distance_from_dispatch_table(vtos) - Interpreter::distance_from_dispatch_table(state)) * wordSize); indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp); #endif } } else { assert(table_mode == DispatchNormal, "invalid dispatch table mode"); address table = (address) Interpreter::normal_table(state); mov_slow(Rtemp, table);
*** 895,923 **** --- 785,806 ---- // points to current entry, starting with top-most entry sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); // points to word before bottom of monitor block cmp(Rcur, Rbottom); // check if there are no monitors #ifndef AARCH64 ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); // prefetch monitor's object #endif // !AARCH64 b(no_unlock, eq); bind(loop); #ifdef AARCH64 ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes())); #endif // AARCH64 // check if current entry is used cbnz(Rcur_obj, exception_monitor_is_still_locked); add(Rcur, Rcur, entry_size); // otherwise advance to next entry cmp(Rcur, Rbottom); // check if bottom reached #ifndef AARCH64 ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); // prefetch monitor's object #endif // !AARCH64 b(loop, ne); // if not at bottom then check this entry } bind(no_unlock);
*** 927,945 **** --- 810,822 ---- } else { notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA } // remove activation #ifdef AARCH64 ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); ldp(FP, LR, Address(FP)); mov(SP, Rtemp); #else mov(Rtemp, FP); ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); #endif if (ret_addr != LR) { mov(ret_addr, LR); } }
*** 963,973 **** --- 840,850 ---- // Lock object // // Argument: R1 : Points to BasicObjectLock to be used for locking. // Must be initialized with object to lock. ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. Calls VM. void InterpreterMacroAssembler::lock_object(Register Rlock) { assert(Rlock == R1, "the second argument"); if (UseHeavyMonitors) { call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
*** 989,1007 **** --- 866,875 ---- if (UseBiasedLocking) { biased_locking_enter(Robj, Rmark/*scratched*/, R0, false, Rtemp, done, slow_case); } #ifdef AARCH64 assert(oopDesc::mark_offset_in_bytes() == 0, "must be"); ldr(Rmark, Robj); // Test if object is already locked assert(markOopDesc::unlocked_value == 1, "adjust this code"); tbz(Rmark, exact_log2(markOopDesc::unlocked_value), already_locked); #else // AARCH64 // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread. // That would be acceptable as ether CAS or slow case path is taken in that case. // Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as // loads are satisfied from a store queue if performed on the same processor).
*** 1011,1021 **** --- 879,888 ---- // Test if object is already locked tst(Rmark, markOopDesc::unlocked_value); b(already_locked, eq); #endif // !AARCH64 // Save old object->mark() into BasicLock's displaced header str(Rmark, Address(Rlock, mark_offset)); cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
*** 1057,1090 **** --- 924,943 ---- // (mark-SP) instead of the low bits of mark. In that case, // assuming page size is a power of 2, we can merge the two // conditions into a single test: // => ((mark - SP) & (3 - os::pagesize())) == 0 #ifdef AARCH64 // Use the single check since the immediate is OK for AARCH64 sub(R0, Rmark, Rstack_top); intptr_t mask = ((intptr_t)3) - ((intptr_t)os::vm_page_size()); Assembler::LogicalImmediate imm(mask, false); ands(R0, R0, imm); // For recursive case store 0 into lock record. // It is harmless to store it unconditionally as lock record contains some garbage // value in its _displaced_header field by this moment. str(ZR, Address(Rlock, mark_offset)); #else // AARCH64 // (3 - os::pagesize()) cannot be encoded as an ARM immediate operand. // Check independently the low bits and the distance to SP. // -1- test low 2 bits movs(R0, AsmOperand(Rmark, lsl, 30)); // -2- test (mark - SP) if the low two bits are 0 sub(R0, Rmark, SP, eq); movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq); // If still 'eq' then recursive locking OK: store 0 into lock record str(R0, Address(Rlock, mark_offset), eq); #endif // AARCH64 #ifndef PRODUCT if (PrintBiasedLockingStatistics) { cond_atomic_inc32(eq, BiasedLocking::fast_path_entry_count_addr()); }
*** 1104,1114 **** --- 957,967 ---- // Unlocks an object. Used in monitorexit bytecode and remove_activation. // // Argument: R1: Points to BasicObjectLock structure for lock // Throw an IllegalMonitorException if object is not locked by current thread ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. Calls VM. void InterpreterMacroAssembler::unlock_object(Register Rlock) { assert(Rlock == R1, "the second argument"); if (UseHeavyMonitors) { call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
*** 1166,1176 **** --- 1019,1029 ---- cbz(mdp, zero_continue); } // Set the method data pointer for the current bcp. ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { assert(ProfileInterpreter, "must be profiling interpreter"); Label set_mdp; // Test MDO to avoid the call if it is NULL.
*** 1263,1288 **** --- 1116,1131 ---- ldr(bumped_count, data); if (decrement) { // Decrement the register. Set condition codes. subs(bumped_count, bumped_count, DataLayout::counter_increment); // Avoid overflow. #ifdef AARCH64 assert(DataLayout::counter_increment == 1, "required for cinc"); cinc(bumped_count, bumped_count, pl); #else add(bumped_count, bumped_count, DataLayout::counter_increment, pl); #endif // AARCH64 } else { // Increment the register. Set condition codes. adds(bumped_count, bumped_count, DataLayout::counter_increment); // Avoid overflow. #ifdef AARCH64 assert(DataLayout::counter_increment == 1, "required for cinv"); cinv(bumped_count, bumped_count, mi); // inverts 0x80..00 back to 0x7f..ff #else sub(bumped_count, bumped_count, DataLayout::counter_increment, mi); #endif // AARCH64 } str(bumped_count, data); }
*** 1326,1336 **** --- 1169,1179 ---- add(mdp_in, mdp_in, constant); str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize)); } ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { assert(ProfileInterpreter, "must be profiling interpreter"); assert_different_registers(return_bci, R0, R1, R2, R3, Rtemp); mov(R1, return_bci);
*** 1540,1550 **** --- 1383,1393 ---- record_klass_in_profile_helper(receiver, mdp, reg_tmp, 0, done, is_virtual_call); bind (done); } ! // Sets mdp, blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). void InterpreterMacroAssembler::profile_ret(Register mdp, Register return_bci) { assert_different_registers(mdp, return_bci, Rtemp, R0, R1, R2, R3); if (ProfileInterpreter) { Label profile_continue;
*** 1702,1731 **** --- 1545,1570 ---- } } void InterpreterMacroAssembler::byteswap_u32(Register r, Register rtmp1, Register rtmp2) { #ifdef AARCH64 rev_w(r, r); #else if (VM_Version::supports_rev()) { rev(r, r); } else { eor(rtmp1, r, AsmOperand(r, ror, 16)); mvn(rtmp2, 0x0000ff00); andr(rtmp1, rtmp2, AsmOperand(rtmp1, lsr, 8)); eor(r, rtmp1, AsmOperand(r, ror, 8)); } #endif // AARCH64 } void InterpreterMacroAssembler::inc_global_counter(address address_of_counter, int offset, Register tmp1, Register tmp2, bool avoid_overflow) { const intx addr = (intx) (address_of_counter + offset); assert ((addr & 0x3) == 0, "address of counter should be aligned"); ! const intx offset_mask = right_n_bits(AARCH64_ONLY(12 + 2) NOT_AARCH64(12)); const address base = (address) (addr & ~offset_mask); const int offs = (int) (addr & offset_mask); const Register addr_base = tmp1;
*** 1734,1751 **** --- 1573,1583 ---- mov_slow(addr_base, base); ldr_s32(val, Address(addr_base, offs)); if (avoid_overflow) { adds_32(val, val, 1); #ifdef AARCH64 Label L; b(L, mi); str_32(val, Address(addr_base, offs)); bind(L); #else str(val, Address(addr_base, offs), pl); #endif // AARCH64 } else { add_32(val, val, 1); str_32(val, Address(addr_base, offs)); } }
*** 1821,1864 **** --- 1653,1682 ---- cbz(Rtemp, L); if (native) { // For c++ and template interpreter push both result registers on the // stack in native, we don't know the state. // On AArch64 result registers are stored into the frame at known locations. // See frame::interpreter_frame_result for code that gets the result values from here. assert(result_lo != noreg, "result registers should be defined"); #ifdef AARCH64 assert(result_hi == noreg, "result_hi is not used on AArch64"); assert(result_fp != fnoreg, "FP result register must be defined"); str_d(result_fp, Address(FP, frame::interpreter_frame_fp_saved_result_offset * wordSize)); str(result_lo, Address(FP, frame::interpreter_frame_gp_saved_result_offset * wordSize)); #else assert(result_hi != noreg, "result registers should be defined"); #ifdef __ABI_HARD__ assert(result_fp != fnoreg, "FP result register must be defined"); sub(SP, SP, 2 * wordSize); fstd(result_fp, Address(SP)); #endif // __ABI_HARD__ push(RegisterSet(result_lo) | RegisterSet(result_hi)); #endif // AARCH64 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); #ifdef AARCH64 ldr_d(result_fp, Address(FP, frame::interpreter_frame_fp_saved_result_offset * wordSize)); ldr(result_lo, Address(FP, frame::interpreter_frame_gp_saved_result_offset * wordSize)); #else pop(RegisterSet(result_lo) | RegisterSet(result_hi)); #ifdef __ABI_HARD__ fldd(result_fp, Address(SP)); add(SP, SP, 2 * wordSize); #endif // __ABI_HARD__ #endif // AARCH64 } else { // For the template interpreter, the value on tos is the size of the // state. (c++ interpreter calls jvmti somewhere else). push(state);
*** 1930,1946 **** --- 1748,1759 ---- assert_different_registers(scratch, scratch2); ldr_u32(scratch, counter_addr); add(scratch, scratch, increment); str_32(scratch, counter_addr); #ifdef AARCH64 ldr_u32(scratch2, mask_addr); ands_w(ZR, scratch, scratch2); #else ldr(scratch2, mask_addr); andrs(scratch, scratch, scratch2); #endif // AARCH64 b(*where, cond); } void InterpreterMacroAssembler::get_method_counters(Register method, Register Rcounters,
*** 1957,1986 **** --- 1770,1788 ---- if (saveRegs) { // Save and restore in use caller-saved registers since they will be trashed by call_VM assert(reg1 != noreg, "must specify reg1"); assert(reg2 != noreg, "must specify reg2"); #ifdef AARCH64 assert(reg3 != noreg, "must specify reg3"); stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed)); stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed)); #else assert(reg3 == noreg, "must not specify reg3"); push(RegisterSet(reg1) | RegisterSet(reg2)); #endif } mov(R1, method); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1); if (saveRegs) { #ifdef AARCH64 ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed)); ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed)); #else pop(RegisterSet(reg1) | RegisterSet(reg2)); #endif } ldr(Rcounters, method_counters); cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory

src/hotspot/cpu/arm/interp_masm_arm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File