hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp

Print this page
rev 611 : Merge

*** 1,10 **** - #ifdef USE_PRAGMA_IDENT_SRC - #pragma ident "@(#)interp_masm_x86_64.cpp 1.48 07/09/17 09:26:04 JVM" - #endif /* ! * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 29,77 **** #include "incls/_interp_masm_x86_64.cpp.incl" // Implementation of InterpreterMacroAssembler void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments) { // interpreter specific // // Note: No need to save/restore bcp & locals (r13 & r14) pointer // since these are callee saved registers and no blocking/ // GC can happen in leaf calls. #ifdef ASSERT - save_bcp(); { Label L; ! cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_leaf_base:" " last_sp != NULL"); bind(L); } #endif // super call MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); // interpreter specific ! #ifdef ASSERT ! { ! Label L; ! cmpq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); ! jcc(Assembler::equal, L); ! stop("InterpreterMacroAssembler::call_VM_leaf_base:" ! " r13 not callee saved?"); ! bind(L); ! } ! { ! Label L; ! cmpq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); ! jcc(Assembler::equal, L); ! stop("InterpreterMacroAssembler::call_VM_leaf_base:" ! " r14 not callee saved?"); ! bind(L); ! } ! #endif } void InterpreterMacroAssembler::call_VM_base(Register oop_result, Register java_thread, Register last_java_sp, --- 26,73 ---- #include "incls/_interp_masm_x86_64.cpp.incl" // Implementation of InterpreterMacroAssembler + #ifdef CC_INTERP + void InterpreterMacroAssembler::get_method(Register reg) { + movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize))); + movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method))); + } + #endif // CC_INTERP + + #ifndef CC_INTERP + void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments) { // interpreter specific // // Note: No need to save/restore bcp & locals (r13 & r14) pointer // since these are callee saved registers and no blocking/ // GC can happen in leaf calls. + // Further Note: DO NOT save/restore bcp/locals. If a caller has + // already saved them so that it can use esi/edi as temporaries + // then a save/restore here will DESTROY the copy the caller + // saved! There used to be a save_bcp() that only happened in + // the ASSERT path (no restore_bcp). Which caused bizarre failures + // when jvm built with ASSERTs. #ifdef ASSERT { Label L; ! cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_leaf_base:" " last_sp != NULL"); bind(L); } #endif // super call MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); // interpreter specific ! // Used to ASSERT that r13/r14 were equal to frame's bcp/locals ! // but since they may not have been saved (and we don't want to ! // save thme here (see note above) the assert is invalid. } void InterpreterMacroAssembler::call_VM_base(Register oop_result, Register java_thread, Register last_java_sp,
*** 87,97 **** // assert(java_thread == noreg , "not expecting a precomputed java thread"); save_bcp(); #ifdef ASSERT { Label L; ! cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_leaf_base:" " last_sp != NULL"); bind(L); } --- 83,93 ---- // assert(java_thread == noreg , "not expecting a precomputed java thread"); save_bcp(); #ifdef ASSERT { Label L; ! cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); jcc(Assembler::equal, L); stop("InterpreterMacroAssembler::call_VM_leaf_base:" " last_sp != NULL"); bind(L); }
*** 128,146 **** } } void InterpreterMacroAssembler::load_earlyret_value(TosState state) { ! movq(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); switch (state) { ! case atos: movq(rax, oop_addr); ! movptr(oop_addr, NULL_WORD); verify_oop(rax, state); break; ! case ltos: movq(rax, val_addr); break; case btos: // fall through case ctos: // fall through case stos: // fall through case itos: movl(rax, val_addr); break; case ftos: movflt(xmm0, val_addr); break; --- 124,142 ---- } } void InterpreterMacroAssembler::load_earlyret_value(TosState state) { ! movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); switch (state) { ! case atos: movptr(rax, oop_addr); ! movptr(oop_addr, (int32_t)NULL_WORD); verify_oop(rax, state); break; ! case ltos: movptr(rax, val_addr); break; case btos: // fall through case ctos: // fall through case stos: // fall through case itos: movl(rax, val_addr); break; case ftos: movflt(xmm0, val_addr); break;
*** 148,166 **** case vtos: /* nothing to do */ break; default : ShouldNotReachHere(); } // Clean up tos value in the thread object movl(tos_addr, (int) ilgl); ! movl(val_addr, (int) NULL_WORD); } void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { if (JvmtiExport::can_force_early_return()) { Label L; ! movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); ! testq(c_rarg0, c_rarg0); jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; // Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code // is called *during* earlyret handling - we don't want to reenter. --- 144,162 ---- case vtos: /* nothing to do */ break; default : ShouldNotReachHere(); } // Clean up tos value in the thread object movl(tos_addr, (int) ilgl); ! movl(val_addr, (int32_t) NULL_WORD); } void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { if (JvmtiExport::can_force_early_return()) { Label L; ! movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); ! testptr(c_rarg0, c_rarg0); jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; // Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code // is called *during* earlyret handling - we don't want to reenter.
*** 168,178 **** cmpl(c_rarg0, JvmtiThreadState::earlyret_pending); jcc(Assembler::notEqual, L); // Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code. ! movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset())); call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0); jmp(rax); bind(L); } --- 164,174 ---- cmpl(c_rarg0, JvmtiThreadState::earlyret_pending); jcc(Assembler::notEqual, L); // Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code. ! movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset())); call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0); jmp(rax); bind(L); }
*** 193,203 **** Register index, int bcp_offset) { assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(cache != index, "must use different registers"); load_unsigned_word(index, Address(r13, bcp_offset)); ! movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index shll(index, 2); } --- 189,199 ---- Register index, int bcp_offset) { assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(cache != index, "must use different registers"); load_unsigned_word(index, Address(r13, bcp_offset)); ! movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index shll(index, 2); }
*** 210,223 **** load_unsigned_word(tmp, Address(r13, bcp_offset)); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index // and from word offset to byte offset shll(tmp, 2 + LogBytesPerWord); ! movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); // skip past the header ! addq(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); ! addq(cache, tmp); // construct pointer to cache entry } // Generate a subtype check: branch to ok_is_subtype if sub_klass is a // subtype of super_klass. --- 206,219 ---- load_unsigned_word(tmp, Address(r13, bcp_offset)); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index // and from word offset to byte offset shll(tmp, 2 + LogBytesPerWord); ! movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); // skip past the header ! addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); ! addptr(cache, tmp); // construct pointer to cache entry } // Generate a subtype check: branch to ok_is_subtype if sub_klass is a // subtype of super_klass.
*** 234,244 **** assert(Rsub_klass != r14, "r14 holds locals"); assert(Rsub_klass != r13, "r13 holds bcp"); assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); ! Label not_subtype, loop; // Profile the not-null value's klass. profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, rdi // Load the super-klass's check offset into rcx --- 230,240 ---- assert(Rsub_klass != r14, "r14 holds locals"); assert(Rsub_klass != r13, "r13 holds bcp"); assert(Rsub_klass != rcx, "rcx holds 2ndary super array length"); assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr"); ! Label not_subtype, not_subtype_pop, loop; // Profile the not-null value's klass. profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, rdi // Load the super-klass's check offset into rcx
*** 248,290 **** // cache of the secondary superclass list, or a failing value with a // sentinel offset if the super-klass is an interface or // exceptionally deep in the Java hierarchy and we have to scan the // secondary superclass list the hard way. See if we get an // immediate positive hit ! cmpq(rax, Address(Rsub_klass, rcx, Address::times_1)); jcc(Assembler::equal,ok_is_subtype); // Check for immediate negative hit cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()); jcc( Assembler::notEqual, not_subtype ); // Check for self ! cmpq(Rsub_klass, rax); jcc(Assembler::equal, ok_is_subtype); // Now do a linear scan of the secondary super-klass chain. ! movq(rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); // rdi holds the objArrayOop of secondary supers. // Load the array length movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); // Skip to start of data; also clear Z flag incase rcx is zero ! addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); // Scan rcx words at [rdi] for occurance of rax // Set NZ/Z based on last compare ! repne_scan(); // Not equal? jcc(Assembler::notEqual, not_subtype); // Must be equal but missed in cache. Update cache. ! movq(Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); jmp(ok_is_subtype); bind(not_subtype); profile_typecheck_failed(rcx); // blows rcx } // Java Expression Stack #ifdef ASSERT // Verifies that the stack tag matches. Must be called before the stack // value is popped off the stack. --- 244,303 ---- // cache of the secondary superclass list, or a failing value with a // sentinel offset if the super-klass is an interface or // exceptionally deep in the Java hierarchy and we have to scan the // secondary superclass list the hard way. See if we get an // immediate positive hit ! cmpptr(rax, Address(Rsub_klass, rcx, Address::times_1)); jcc(Assembler::equal,ok_is_subtype); // Check for immediate negative hit cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()); jcc( Assembler::notEqual, not_subtype ); // Check for self ! cmpptr(Rsub_klass, rax); jcc(Assembler::equal, ok_is_subtype); // Now do a linear scan of the secondary super-klass chain. ! movptr(rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); // rdi holds the objArrayOop of secondary supers. // Load the array length movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); // Skip to start of data; also clear Z flag incase rcx is zero ! addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); // Scan rcx words at [rdi] for occurance of rax // Set NZ/Z based on last compare ! ! // this part is kind tricky, as values in supers array could be 32 or 64 bit wide ! // and we store values in objArrays always encoded, thus we need to encode value ! // before repne ! if (UseCompressedOops) { ! push(rax); ! encode_heap_oop(rax); ! repne_scanl(); // Not equal? + jcc(Assembler::notEqual, not_subtype_pop); + // restore heap oop here for movq + pop(rax); + } else { + repne_scan(); jcc(Assembler::notEqual, not_subtype); + } // Must be equal but missed in cache. Update cache. ! movptr(Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax); jmp(ok_is_subtype); + bind(not_subtype_pop); + // restore heap oop here for miss + if (UseCompressedOops) pop(rax); bind(not_subtype); profile_typecheck_failed(rcx); // blows rcx } + // Java Expression Stack #ifdef ASSERT // Verifies that the stack tag matches. Must be called before the stack // value is popped off the stack.
*** 292,398 **** if (TaggedStackInterpreter) { frame::Tag tag = t; if (t == frame::TagCategory2) { tag = frame::TagValue; Label hokay; ! cmpq(Address(rsp, 3*wordSize), (int)tag); jcc(Assembler::equal, hokay); stop("Java Expression stack tag high value is bad"); bind(hokay); } Label okay; ! cmpq(Address(rsp, wordSize), (int)tag); jcc(Assembler::equal, okay); // Also compare if the stack value is zero, then the tag might // not have been set coming from deopt. ! cmpq(Address(rsp, 0), 0); jcc(Assembler::equal, okay); stop("Java Expression stack tag value is bad"); bind(okay); } } #endif // ASSERT void InterpreterMacroAssembler::pop_ptr(Register r) { debug_only(verify_stack_tag(frame::TagReference)); ! popq(r); ! if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) { ! popq(r); ! if (TaggedStackInterpreter) popq(tag); } void InterpreterMacroAssembler::pop_i(Register r) { ! // XXX can't use popq currently, upper half non clean debug_only(verify_stack_tag(frame::TagValue)); movl(r, Address(rsp, 0)); ! addq(rsp, wordSize); ! if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_l(Register r) { debug_only(verify_stack_tag(frame::TagCategory2)); movq(r, Address(rsp, 0)); ! addq(rsp, 2 * Interpreter::stackElementSize()); } void InterpreterMacroAssembler::pop_f(XMMRegister r) { debug_only(verify_stack_tag(frame::TagValue)); movflt(r, Address(rsp, 0)); ! addq(rsp, wordSize); ! if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_d(XMMRegister r) { debug_only(verify_stack_tag(frame::TagCategory2)); movdbl(r, Address(rsp, 0)); ! addq(rsp, 2 * Interpreter::stackElementSize()); } void InterpreterMacroAssembler::push_ptr(Register r) { ! if (TaggedStackInterpreter) pushq(frame::TagReference); ! pushq(r); } void InterpreterMacroAssembler::push_ptr(Register r, Register tag) { ! if (TaggedStackInterpreter) pushq(tag); ! pushq(r); } void InterpreterMacroAssembler::push_i(Register r) { ! if (TaggedStackInterpreter) pushq(frame::TagValue); ! pushq(r); } void InterpreterMacroAssembler::push_l(Register r) { if (TaggedStackInterpreter) { ! pushq(frame::TagValue); ! subq(rsp, 1 * wordSize); ! pushq(frame::TagValue); ! subq(rsp, 1 * wordSize); } else { ! subq(rsp, 2 * wordSize); } movq(Address(rsp, 0), r); } void InterpreterMacroAssembler::push_f(XMMRegister r) { ! if (TaggedStackInterpreter) pushq(frame::TagValue); ! subq(rsp, wordSize); movflt(Address(rsp, 0), r); } void InterpreterMacroAssembler::push_d(XMMRegister r) { if (TaggedStackInterpreter) { ! pushq(frame::TagValue); ! subq(rsp, 1 * wordSize); ! pushq(frame::TagValue); ! subq(rsp, 1 * wordSize); } else { ! subq(rsp, 2 * wordSize); } movdbl(Address(rsp, 0), r); } void InterpreterMacroAssembler::pop(TosState state) { --- 305,411 ---- if (TaggedStackInterpreter) { frame::Tag tag = t; if (t == frame::TagCategory2) { tag = frame::TagValue; Label hokay; ! cmpptr(Address(rsp, 3*wordSize), (int32_t)tag); jcc(Assembler::equal, hokay); stop("Java Expression stack tag high value is bad"); bind(hokay); } Label okay; ! cmpptr(Address(rsp, wordSize), (int32_t)tag); jcc(Assembler::equal, okay); // Also compare if the stack value is zero, then the tag might // not have been set coming from deopt. ! cmpptr(Address(rsp, 0), 0); jcc(Assembler::equal, okay); stop("Java Expression stack tag value is bad"); bind(okay); } } #endif // ASSERT void InterpreterMacroAssembler::pop_ptr(Register r) { debug_only(verify_stack_tag(frame::TagReference)); ! pop(r); ! if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) { ! pop(r); ! if (TaggedStackInterpreter) pop(tag); } void InterpreterMacroAssembler::pop_i(Register r) { ! // XXX can't use pop currently, upper half non clean debug_only(verify_stack_tag(frame::TagValue)); movl(r, Address(rsp, 0)); ! addptr(rsp, wordSize); ! if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_l(Register r) { debug_only(verify_stack_tag(frame::TagCategory2)); movq(r, Address(rsp, 0)); ! addptr(rsp, 2 * Interpreter::stackElementSize()); } void InterpreterMacroAssembler::pop_f(XMMRegister r) { debug_only(verify_stack_tag(frame::TagValue)); movflt(r, Address(rsp, 0)); ! addptr(rsp, wordSize); ! if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize); } void InterpreterMacroAssembler::pop_d(XMMRegister r) { debug_only(verify_stack_tag(frame::TagCategory2)); movdbl(r, Address(rsp, 0)); ! addptr(rsp, 2 * Interpreter::stackElementSize()); } void InterpreterMacroAssembler::push_ptr(Register r) { ! if (TaggedStackInterpreter) push(frame::TagReference); ! push(r); } void InterpreterMacroAssembler::push_ptr(Register r, Register tag) { ! if (TaggedStackInterpreter) push(tag); ! push(r); } void InterpreterMacroAssembler::push_i(Register r) { ! if (TaggedStackInterpreter) push(frame::TagValue); ! push(r); } void InterpreterMacroAssembler::push_l(Register r) { if (TaggedStackInterpreter) { ! push(frame::TagValue); ! subptr(rsp, 1 * wordSize); ! push(frame::TagValue); ! subptr(rsp, 1 * wordSize); } else { ! subptr(rsp, 2 * wordSize); } movq(Address(rsp, 0), r); } void InterpreterMacroAssembler::push_f(XMMRegister r) { ! if (TaggedStackInterpreter) push(frame::TagValue); ! subptr(rsp, wordSize); movflt(Address(rsp, 0), r); } void InterpreterMacroAssembler::push_d(XMMRegister r) { if (TaggedStackInterpreter) { ! push(frame::TagValue); ! subptr(rsp, 1 * wordSize); ! push(frame::TagValue); ! subptr(rsp, 1 * wordSize); } else { ! subptr(rsp, 2 * wordSize); } movdbl(Address(rsp, 0), r); } void InterpreterMacroAssembler::pop(TosState state) {
*** 426,514 **** default : ShouldNotReachHere(); } } // Tagged stack helpers for swap and dup void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val, Register tag) { ! movq(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); if (TaggedStackInterpreter) { ! movq(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); } } void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val, Register tag) { ! movq(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); if (TaggedStackInterpreter) { ! movq(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); } } // Tagged local support void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { ! mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), ! (intptr_t)frame::TagValue); ! mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), ! (intptr_t)frame::TagValue); } else { ! mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (intptr_t)tag); } } } void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { ! mov64(Address(r14, idx, Address::times_8, ! Interpreter::local_tag_offset_in_bytes(1)), (intptr_t)frame::TagValue); ! mov64(Address(r14, idx, Address::times_8, ! Interpreter::local_tag_offset_in_bytes(0)), (intptr_t)frame::TagValue); } else { ! mov64(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), ! (intptr_t)tag); } } } void InterpreterMacroAssembler::tag_local(Register tag, Register idx) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference ! movq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag); } } void InterpreterMacroAssembler::tag_local(Register tag, int n) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference ! movq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag); } } #ifdef ASSERT void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) { if (TaggedStackInterpreter) { frame::Tag t = tag; if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals ! cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; ! cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. ! cmpq(Address(r14, Interpreter::local_offset_in_bytes(n)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); } } --- 439,529 ---- default : ShouldNotReachHere(); } } + + // Tagged stack helpers for swap and dup void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val, Register tag) { ! movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); if (TaggedStackInterpreter) { ! movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); } } void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val, Register tag) { ! movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); if (TaggedStackInterpreter) { ! movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); } } // Tagged local support void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { ! movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), ! (int32_t)frame::TagValue); ! movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), ! (int32_t)frame::TagValue); } else { ! movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag); } } } void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) { if (TaggedStackInterpreter) { if (tag == frame::TagCategory2) { ! movptr(Address(r14, idx, Address::times_8, ! Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue); ! movptr(Address(r14, idx, Address::times_8, ! Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue); } else { ! movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), ! (int32_t)tag); } } } void InterpreterMacroAssembler::tag_local(Register tag, Register idx) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference ! movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag); } } void InterpreterMacroAssembler::tag_local(Register tag, int n) { if (TaggedStackInterpreter) { // can only be TagValue or TagReference ! movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag); } } #ifdef ASSERT void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) { if (TaggedStackInterpreter) { frame::Tag t = tag; if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals ! cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; ! cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. ! cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); } }
*** 517,537 **** if (TaggedStackInterpreter) { frame::Tag t = tag; if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals ! cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; ! cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. ! cmpq(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); } } --- 532,552 ---- if (TaggedStackInterpreter) { frame::Tag t = tag; if (tag == frame::TagCategory2) { Label nbl; t = frame::TagValue; // change to what is stored in locals ! cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t); jcc(Assembler::equal, nbl); stop("Local tag is bad for long/double"); bind(nbl); } Label notBad; ! cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t); jcc(Assembler::equal, notBad); // Also compare if the local value is zero, then the tag might // not have been set coming from deopt. ! cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0); jcc(Assembler::equal, notBad); stop("Local tag is bad"); bind(notBad); } }
*** 544,554 **** void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) { if (c_rarg0 != arg_1) { ! movq(c_rarg0, arg_1); } MacroAssembler::call_VM_leaf_base(entry_point, 1); } --- 559,569 ---- void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) { if (c_rarg0 != arg_1) { ! mov(c_rarg0, arg_1); } MacroAssembler::call_VM_leaf_base(entry_point, 1); }
*** 556,569 **** Register arg_1, Register arg_2) { assert(c_rarg0 != arg_2, "smashed argument"); assert(c_rarg1 != arg_1, "smashed argument"); if (c_rarg0 != arg_1) { ! movq(c_rarg0, arg_1); } if (c_rarg1 != arg_2) { ! movq(c_rarg1, arg_2); } MacroAssembler::call_VM_leaf_base(entry_point, 2); } void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, --- 571,584 ---- Register arg_1, Register arg_2) { assert(c_rarg0 != arg_2, "smashed argument"); assert(c_rarg1 != arg_1, "smashed argument"); if (c_rarg0 != arg_1) { ! mov(c_rarg0, arg_1); } if (c_rarg1 != arg_2) { ! mov(c_rarg1, arg_2); } MacroAssembler::call_VM_leaf_base(entry_point, 2); } void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
*** 575,602 **** assert(c_rarg1 != arg_1, "smashed argument"); assert(c_rarg1 != arg_3, "smashed argument"); assert(c_rarg2 != arg_1, "smashed argument"); assert(c_rarg2 != arg_2, "smashed argument"); if (c_rarg0 != arg_1) { ! movq(c_rarg0, arg_1); } if (c_rarg1 != arg_2) { ! movq(c_rarg1, arg_2); } if (c_rarg2 != arg_3) { ! movq(c_rarg2, arg_3); } MacroAssembler::call_VM_leaf_base(entry_point, 3); } // Jump to from_interpreted entry of a call unless single stepping is possible // in this thread in which case we must call the i2i entry void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { // set sender sp ! leaq(r13, Address(rsp, wordSize)); // record last_sp ! movq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13); if (JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; // JVMTI events, such as single-stepping, are implemented partly by avoiding running // compiled code in threads for which the event is enabled. Check here for --- 590,617 ---- assert(c_rarg1 != arg_1, "smashed argument"); assert(c_rarg1 != arg_3, "smashed argument"); assert(c_rarg2 != arg_1, "smashed argument"); assert(c_rarg2 != arg_2, "smashed argument"); if (c_rarg0 != arg_1) { ! mov(c_rarg0, arg_1); } if (c_rarg1 != arg_2) { ! mov(c_rarg1, arg_2); } if (c_rarg2 != arg_3) { ! mov(c_rarg2, arg_3); } MacroAssembler::call_VM_leaf_base(entry_point, 3); } // Jump to from_interpreted entry of a call unless single stepping is possible // in this thread in which case we must call the i2i entry void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { // set sender sp ! lea(r13, Address(rsp, wordSize)); // record last_sp ! movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13); if (JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; // JVMTI events, such as single-stepping, are implemented partly by avoiding running // compiled code in threads for which the event is enabled. Check here for
*** 629,644 **** address* table, bool verifyoop) { verify_FPU(1, state); if (VerifyActivationFrameSize) { Label L; ! movq(rcx, rbp); ! subq(rcx, rsp); ! int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; ! cmpq(rcx, min_frame_size); jcc(Assembler::greaterEqual, L); stop("broken stack frame"); bind(L); } if (verifyoop) { --- 644,659 ---- address* table, bool verifyoop) { verify_FPU(1, state); if (VerifyActivationFrameSize) { Label L; ! mov(rcx, rbp); ! subptr(rcx, rsp); ! int32_t min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; ! cmpptr(rcx, (int32_t)min_frame_size); jcc(Assembler::greaterEqual, L); stop("broken stack frame"); bind(L); } if (verifyoop) {
*** 663,673 **** void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { // load next bytecode (load before advancing r13 to prevent AGI) load_unsigned_byte(rbx, Address(r13, step)); // advance r13 ! incrementq(r13, step); dispatch_base(state, Interpreter::dispatch_table(state)); } void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { // load current bytecode --- 678,688 ---- void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { // load next bytecode (load before advancing r13 to prevent AGI) load_unsigned_byte(rbx, Address(r13, step)); // advance r13 ! increment(r13, step); dispatch_base(state, Interpreter::dispatch_table(state)); } void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { // load current bytecode
*** 703,713 **** in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); movbool(rdx, do_not_unlock_if_synchronized); movbool(do_not_unlock_if_synchronized, false); // reset the flag // get method access flags ! movq(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); movl(rcx, Address(rbx, methodOopDesc::access_flags_offset())); testl(rcx, JVM_ACC_SYNCHRONIZED); jcc(Assembler::zero, unlocked); // Don't unlock anything if the _do_not_unlock_if_synchronized flag --- 718,728 ---- in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); movbool(rdx, do_not_unlock_if_synchronized); movbool(do_not_unlock_if_synchronized, false); // reset the flag // get method access flags ! movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); movl(rcx, Address(rbx, methodOopDesc::access_flags_offset())); testl(rcx, JVM_ACC_SYNCHRONIZED); jcc(Assembler::zero, unlocked); // Don't unlock anything if the _do_not_unlock_if_synchronized flag
*** 723,736 **** // not been unlocked by an explicit monitorexit bytecode. const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int) sizeof(BasicObjectLock)); // We use c_rarg1 so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly ! leaq(c_rarg1, monitor); // address of first monitor ! movq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); ! testq(rax, rax); jcc(Assembler::notZero, unlock); pop(state); if (throw_monitor_exception) { // Entry already unlocked, need to throw exception --- 738,751 ---- // not been unlocked by an explicit monitorexit bytecode. const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int) sizeof(BasicObjectLock)); // We use c_rarg1 so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly ! lea(c_rarg1, monitor); // address of first monitor ! movptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); ! testptr(rax, rax); jcc(Assembler::notZero, unlock); pop(state); if (throw_monitor_exception) { // Entry already unlocked, need to throw exception
*** 768,780 **** rbp, frame::interpreter_frame_initial_sp_offset * wordSize); bind(restart); // We use c_rarg1 so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly ! movq(c_rarg1, monitor_block_top); // points to current entry, starting // with top-most entry ! leaq(rbx, monitor_block_bot); // points to word before bottom of // monitor block jmp(entry); // Entry already locked, need to throw exception bind(exception); --- 783,795 ---- rbp, frame::interpreter_frame_initial_sp_offset * wordSize); bind(restart); // We use c_rarg1 so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly ! movptr(c_rarg1, monitor_block_top); // points to current entry, starting // with top-most entry ! lea(rbx, monitor_block_bot); // points to word before bottom of // monitor block jmp(entry); // Entry already locked, need to throw exception bind(exception);
*** 803,818 **** jmp(restart); } bind(loop); // check if current entry is used ! cmpq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); jcc(Assembler::notEqual, exception); ! addq(c_rarg1, entry_size); // otherwise advance to next entry bind(entry); ! cmpq(c_rarg1, rbx); // check if bottom reached jcc(Assembler::notEqual, loop); // if not at bottom then check this entry } bind(no_unlock); --- 818,833 ---- jmp(restart); } bind(loop); // check if current entry is used ! cmpptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL); jcc(Assembler::notEqual, exception); ! addptr(c_rarg1, entry_size); // otherwise advance to next entry bind(entry); ! cmpptr(c_rarg1, rbx); // check if bottom reached jcc(Assembler::notEqual, loop); // if not at bottom then check this entry } bind(no_unlock);
*** 823,839 **** notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA } // remove activation // get sender sp ! movq(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); leave(); // remove frame anchor ! popq(ret_addr); // get return address ! movq(rsp, rbx); // set sp to sender sp } // Lock object // // Args: // c_rarg1: BasicObjectLock to be used for locking // --- 838,856 ---- notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA } // remove activation // get sender sp ! movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); leave(); // remove frame anchor ! pop(ret_addr); // get return address ! mov(rsp, rbx); // set sp to sender sp } + #endif // C_INTERP + // Lock object // // Args: // c_rarg1: BasicObjectLock to be used for locking //
*** 860,889 **** BasicLock::displaced_header_offset_in_bytes(); Label slow_case; // Load object pointer into obj_reg %c_rarg3 ! movq(obj_reg, Address(lock_reg, obj_offset)); if (UseBiasedLocking) { biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case); } // Load immediate 1 into swap_reg %rax movl(swap_reg, 1); // Load (object->mark() | 1) into swap_reg %rax ! orq(swap_reg, Address(obj_reg, 0)); // Save (object->mark() | 1) into BasicLock's displaced header ! movq(Address(lock_reg, mark_offset), swap_reg); assert(lock_offset == 0, "displached header must be first word in BasicObjectLock"); if (os::is_MP()) lock(); ! cmpxchgq(lock_reg, Address(obj_reg, 0)); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); } jcc(Assembler::zero, done); --- 877,906 ---- BasicLock::displaced_header_offset_in_bytes(); Label slow_case; // Load object pointer into obj_reg %c_rarg3 ! movptr(obj_reg, Address(lock_reg, obj_offset)); if (UseBiasedLocking) { biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case); } // Load immediate 1 into swap_reg %rax movl(swap_reg, 1); // Load (object->mark() | 1) into swap_reg %rax ! orptr(swap_reg, Address(obj_reg, 0)); // Save (object->mark() | 1) into BasicLock's displaced header ! movptr(Address(lock_reg, mark_offset), swap_reg); assert(lock_offset == 0, "displached header must be first word in BasicObjectLock"); if (os::is_MP()) lock(); ! cmpxchgptr(lock_reg, Address(obj_reg, 0)); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); } jcc(Assembler::zero, done);
*** 895,909 **** // These 3 tests can be done by evaluating the following // expression: ((mark - rsp) & (7 - os::vm_page_size())), // assuming both stack pointer and pagesize have their // least significant 3 bits clear. // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg ! subq(swap_reg, rsp); ! andq(swap_reg, 7 - os::vm_page_size()); // Save the test result, for recursive case, the result is zero ! movq(Address(lock_reg, mark_offset), swap_reg); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); } --- 912,926 ---- // These 3 tests can be done by evaluating the following // expression: ((mark - rsp) & (7 - os::vm_page_size())), // assuming both stack pointer and pagesize have their // least significant 3 bits clear. // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg ! subptr(swap_reg, rsp); ! andptr(swap_reg, 7 - os::vm_page_size()); // Save the test result, for recursive case, the result is zero ! movptr(Address(lock_reg, mark_offset), swap_reg); if (PrintBiasedLockingStatistics) { cond_inc32(Assembler::zero, ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); }
*** 948,988 **** save_bcp(); // Save in case of exception // Convert from BasicObjectLock structure to object and BasicLock // structure Store the BasicLock address into %rax ! leaq(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); // Load oop into obj_reg(%c_rarg3) ! movq(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // Free entry ! movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); if (UseBiasedLocking) { biased_locking_exit(obj_reg, header_reg, done); } // Load the old header from BasicLock structure ! movq(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes())); // Test for recursion ! testq(header_reg, header_reg); // zero for recursive case jcc(Assembler::zero, done); // Atomic swap back the old header if (os::is_MP()) lock(); ! cmpxchgq(header_reg, Address(obj_reg, 0)); // zero for recursive case jcc(Assembler::zero, done); // Call the runtime routine for slow case. ! movq(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); --- 965,1005 ---- save_bcp(); // Save in case of exception // Convert from BasicObjectLock structure to object and BasicLock // structure Store the BasicLock address into %rax ! lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); // Load oop into obj_reg(%c_rarg3) ! movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // Free entry ! movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); if (UseBiasedLocking) { biased_locking_exit(obj_reg, header_reg, done); } // Load the old header from BasicLock structure ! movptr(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes())); // Test for recursion ! testptr(header_reg, header_reg); // zero for recursive case jcc(Assembler::zero, done); // Atomic swap back the old header if (os::is_MP()) lock(); ! cmpxchgptr(header_reg, Address(obj_reg, 0)); // zero for recursive case jcc(Assembler::zero, done); // Call the runtime routine for slow case. ! movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
*** 990,1078 **** restore_bcp(); } } void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); ! movq(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); ! testq(mdp, mdp); jcc(Assembler::zero, zero_continue); } // Set the method data pointer for the current bcp. void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { assert(ProfileInterpreter, "must be profiling interpreter"); Label zero_continue; ! pushq(rax); ! pushq(rbx); get_method(rbx); // Test MDO to avoid the call if it is NULL. ! movq(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); ! testq(rax, rax); jcc(Assembler::zero, zero_continue); // rbx: method // r13: bcp call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13); // rax: mdi ! movq(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); ! testq(rbx, rbx); jcc(Assembler::zero, zero_continue); ! addq(rbx, in_bytes(methodDataOopDesc::data_offset())); ! addq(rbx, rax); ! movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); bind(zero_continue); ! popq(rbx); ! popq(rax); } void InterpreterMacroAssembler::verify_method_data_pointer() { assert(ProfileInterpreter, "must be profiling interpreter"); #ifdef ASSERT Label verify_continue; ! pushq(rax); ! pushq(rbx); ! pushq(c_rarg3); ! pushq(c_rarg2); test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue get_method(rbx); // If the mdp is valid, it will point to a DataLayout header which is // consistent with the bcp. The converse is highly probable also. load_unsigned_word(c_rarg2, Address(c_rarg3, in_bytes(DataLayout::bci_offset()))); ! addq(c_rarg2, Address(rbx, methodOopDesc::const_offset())); ! leaq(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset())); ! cmpq(c_rarg2, r13); jcc(Assembler::equal, verify_continue); // rbx: method // r13: bcp // c_rarg3: mdp call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, r13, c_rarg3); bind(verify_continue); ! popq(c_rarg2); ! popq(c_rarg3); ! popq(rbx); ! popq(rax); #endif // ASSERT } void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) { assert(ProfileInterpreter, "must be profiling interpreter"); Address data(mdp_in, constant); ! movq(data, value); } void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, int constant, --- 1007,1096 ---- restore_bcp(); } } + #ifndef CC_INTERP void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); ! movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); ! testptr(mdp, mdp); jcc(Assembler::zero, zero_continue); } // Set the method data pointer for the current bcp. void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { assert(ProfileInterpreter, "must be profiling interpreter"); Label zero_continue; ! push(rax); ! push(rbx); get_method(rbx); // Test MDO to avoid the call if it is NULL. ! movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); ! testptr(rax, rax); jcc(Assembler::zero, zero_continue); // rbx: method // r13: bcp call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13); // rax: mdi ! movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); ! testptr(rbx, rbx); jcc(Assembler::zero, zero_continue); ! addptr(rbx, in_bytes(methodDataOopDesc::data_offset())); ! addptr(rbx, rax); ! movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); bind(zero_continue); ! pop(rbx); ! pop(rax); } void InterpreterMacroAssembler::verify_method_data_pointer() { assert(ProfileInterpreter, "must be profiling interpreter"); #ifdef ASSERT Label verify_continue; ! push(rax); ! push(rbx); ! push(c_rarg3); ! push(c_rarg2); test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue get_method(rbx); // If the mdp is valid, it will point to a DataLayout header which is // consistent with the bcp. The converse is highly probable also. load_unsigned_word(c_rarg2, Address(c_rarg3, in_bytes(DataLayout::bci_offset()))); ! addptr(c_rarg2, Address(rbx, methodOopDesc::const_offset())); ! lea(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset())); ! cmpptr(c_rarg2, r13); jcc(Assembler::equal, verify_continue); // rbx: method // r13: bcp // c_rarg3: mdp call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, r13, c_rarg3); bind(verify_continue); ! pop(c_rarg2); ! pop(c_rarg3); ! pop(rbx); ! pop(rax); #endif // ASSERT } void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) { assert(ProfileInterpreter, "must be profiling interpreter"); Address data(mdp_in, constant); ! movptr(data, value); } void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, int constant,
*** 1084,1109 **** } void InterpreterMacroAssembler::increment_mdp_data_at(Address data, bool decrement) { assert(ProfileInterpreter, "must be profiling interpreter"); if (decrement) { // Decrement the register. Set condition codes. ! addq(data, -DataLayout::counter_increment); // If the decrement causes the counter to overflow, stay negative Label L; jcc(Assembler::negative, L); ! addq(data, DataLayout::counter_increment); bind(L); } else { assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1"); // Increment the register. Set carry flag. ! addq(data, DataLayout::counter_increment); // If the increment causes the counter to overflow, pull back by 1. ! sbbq(data, 0); } } void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, --- 1102,1129 ---- } void InterpreterMacroAssembler::increment_mdp_data_at(Address data, bool decrement) { assert(ProfileInterpreter, "must be profiling interpreter"); + // %%% this does 64bit counters at best it is wasting space + // at worst it is a rare bug when counters overflow if (decrement) { // Decrement the register. Set condition codes. ! addptr(data, (int32_t) -DataLayout::counter_increment); // If the decrement causes the counter to overflow, stay negative Label L; jcc(Assembler::negative, L); ! addptr(data, (int32_t) DataLayout::counter_increment); bind(L); } else { assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1"); // Increment the register. Set carry flag. ! addptr(data, DataLayout::counter_increment); // If the increment causes the counter to overflow, pull back by 1. ! sbbptr(data, (int32_t)0); } } void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
*** 1131,1184 **** Register value, Register test_value_out, Label& not_equal_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); if (test_value_out == noreg) { ! cmpq(value, Address(mdp_in, offset)); } else { // Put the test value into a register, so caller can use it: ! movq(test_value_out, Address(mdp_in, offset)); ! cmpq(test_value_out, value); } jcc(Assembler::notEqual, not_equal_continue); } void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, offset_of_disp); ! addq(mdp_in, disp_address); ! movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); ! addq(mdp_in, disp_address); ! movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) { assert(ProfileInterpreter, "must be profiling interpreter"); ! addq(mdp_in, constant); ! movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { assert(ProfileInterpreter, "must be profiling interpreter"); ! pushq(return_bci); // save/restore across call_VM call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); ! popq(return_bci); } void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) { --- 1151,1204 ---- Register value, Register test_value_out, Label& not_equal_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); if (test_value_out == noreg) { ! cmpptr(value, Address(mdp_in, offset)); } else { // Put the test value into a register, so caller can use it: ! movptr(test_value_out, Address(mdp_in, offset)); ! cmpptr(test_value_out, value); } jcc(Assembler::notEqual, not_equal_continue); } void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, offset_of_disp); ! addptr(mdp_in, disp_address); ! movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) { assert(ProfileInterpreter, "must be profiling interpreter"); Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); ! addptr(mdp_in, disp_address); ! movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) { assert(ProfileInterpreter, "must be profiling interpreter"); ! addptr(mdp_in, constant); ! movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); } void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { assert(ProfileInterpreter, "must be profiling interpreter"); ! push(return_bci); // save/restore across call_VM call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); ! pop(return_bci); } void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) {
*** 1191,1206 **** // We are taking a branch. Increment the taken count. // We inline increment_mdp_data_at to return bumped_count in a register //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); Address data(mdp, in_bytes(JumpData::taken_offset())); ! movq(bumped_count, data); assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1"); ! addq(bumped_count, DataLayout::counter_increment); ! sbbq(bumped_count, 0); ! movq(data, bumped_count); // Store back out // The method data pointer needs to be updated to reflect the new target. update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); bind(profile_continue); } --- 1211,1226 ---- // We are taking a branch. Increment the taken count. // We inline increment_mdp_data_at to return bumped_count in a register //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); Address data(mdp, in_bytes(JumpData::taken_offset())); ! movptr(bumped_count, data); assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1"); ! addptr(bumped_count, DataLayout::counter_increment); ! sbbptr(bumped_count, 0); ! movptr(data, bumped_count); // Store back out // The method data pointer needs to be updated to reflect the new target. update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); bind(profile_continue); }
*** 1324,1334 **** jmp(done); bind(next_test); if (test_for_null_also) { // Failed the equality check on receiver[n]... Test for null. ! testq(reg2, reg2); if (start_row == last_row) { // The only thing left to do is handle the null case. jcc(Assembler::notZero, done); break; } --- 1344,1354 ---- jmp(done); bind(next_test); if (test_for_null_also) { // Failed the equality check on receiver[n]... Test for null. ! testptr(reg2, reg2); if (start_row == last_row) { // The only thing left to do is handle the null case. jcc(Assembler::notZero, done); break; }
*** 1520,1531 **** test_method_data_pointer(mdp, profile_continue); // Build the base (index * per_case_size_in_bytes()) + // case_array_offset_in_bytes() movl(reg2, in_bytes(MultiBranchData::per_case_size())); ! imulq(index, reg2); // XXX l ? ! addq(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? // Update the case count increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset())); --- 1540,1551 ---- test_method_data_pointer(mdp, profile_continue); // Build the base (index * per_case_size_in_bytes()) + // case_array_offset_in_bytes() movl(reg2, in_bytes(MultiBranchData::per_case_size())); ! imulptr(index, reg2); // XXX l ? ! addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? // Update the case count increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset()));
*** 1539,1556 **** --- 1559,1578 ---- bind(profile_continue); } } + void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { if (state == atos) { MacroAssembler::verify_oop(reg); } } void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { } + #endif // !CC_INTERP void InterpreterMacroAssembler::notify_method_entry() { // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to // track stack depth. If it is possible to enter interp_only_mode we add
*** 1569,1578 **** --- 1591,1608 ---- SkipIfEqual skip(this, &DTraceMethodProbes, false); get_method(c_rarg1); call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), r15_thread, c_rarg1); } + + // RedefineClasses() tracing support for obsolete method entry + if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { + get_method(c_rarg1); + call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), + r15_thread, c_rarg1); + } } void InterpreterMacroAssembler::notify_method_exit( TosState state, NotifyMethodExitMode mode) {
*** 1583,1606 **** Label L; // Note: frame::interpreter_frame_result has a dependency on how the // method result is saved across the call to post_method_exit. If this // is changed then the interpreter_frame_result implementation will // need to be updated too. ! push(state); movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset())); testl(rdx, rdx); jcc(Assembler::zero, L); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); bind(L); ! pop(state); } { SkipIfEqual skip(this, &DTraceMethodProbes, false); ! push(state); get_method(c_rarg1); call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), r15_thread, c_rarg1); ! pop(state); } } --- 1613,1639 ---- Label L; // Note: frame::interpreter_frame_result has a dependency on how the // method result is saved across the call to post_method_exit. If this // is changed then the interpreter_frame_result implementation will // need to be updated too. ! ! // For c++ interpreter the result is always stored at a known location in the frame ! // template interpreter will leave it on the top of the stack. ! NOT_CC_INTERP(push(state);) movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset())); testl(rdx, rdx); jcc(Assembler::zero, L); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); bind(L); ! NOT_CC_INTERP(pop(state)); } { SkipIfEqual skip(this, &DTraceMethodProbes, false); ! NOT_CC_INTERP(push(state)); get_method(c_rarg1); call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), r15_thread, c_rarg1); ! NOT_CC_INTERP(pop(state)); } }