src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8074457 Cdiff src/cpu/sparc/vm/interp_masm_sparc.cpp

src/cpu/sparc/vm/interp_masm_sparc.cpp

Print this page

        

*** 37,47 **** #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp" - #ifndef CC_INTERP #ifndef FAST_DISPATCH #define FAST_DISPATCH 1 #endif #undef FAST_DISPATCH --- 37,46 ----
*** 50,66 **** // This file specializes the assember with interpreter-specific macros const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); - #else // CC_INTERP - #ifndef STATE - #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) - #endif // STATE - - #endif // CC_INTERP - void InterpreterMacroAssembler::jump_to_entry(address entry) { assert(entry, "Entry must have been generated by now"); AddressLiteral al(entry); jump_to(al, G3_scratch); delayed()->nop(); --- 49,58 ----
*** 80,91 **** bind(skip_move); round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes } - #ifndef CC_INTERP - // Dispatch code executed in the prolog of a bytecode which does not do it's // own dispatch. The dispatch address is computed and placed in IdispatchAddress void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { assert_not_delayed(); #ifdef FAST_DISPATCH --- 72,81 ----
*** 263,276 **** void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { mov(arg_1, O0); mov(arg_2, O1); MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); } - #endif /* CC_INTERP */ - - - #ifndef CC_INTERP void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { assert_not_delayed(); dispatch_Lbyte_code(state, table); } --- 253,262 ----
*** 1187,1198 **** or3 (Otos_l2->after_save(), G1, G1); } #endif /* COMPILER2 */ } - #endif /* CC_INTERP */ - // Lock object // // Argument - lock_reg points to the BasicObjectLock to be used for locking, // it must be initialized with the object to lock --- 1173,1182 ----
*** 1321,1332 **** bind(done); } } - #ifndef CC_INTERP - // Get the method data pointer from the Method* and set the // specified register to its value. void InterpreterMacroAssembler::set_method_data_pointer() { assert(ProfileInterpreter, "must be profiling interpreter"); --- 1305,1314 ----
*** 2364,2375 **** void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { add( Lesp, wordSize, Rdest ); } - #endif /* CC_INTERP */ - void InterpreterMacroAssembler::get_method_counters(Register method, Register Rcounters, Label& skip) { Label has_counters; Address method_counters(method, in_bytes(Method::method_counters_offset())); --- 2346,2355 ----
*** 2441,2451 **** add( Rtmp, Rtmp2, Rtmp ); // Note that this macro must leave backedge_count + invocation_count in Rtmp! } - #ifndef CC_INTERP void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, Register method_counters, Register branch_bcp, Register Rtmp ) { Label did_not_overflow; --- 2421,2430 ----
*** 2579,2589 **** ld(mask_addr, scratch2); andcc(scratch1, scratch2, G0); br(cond, false, Assembler::pn, *where); delayed()->st(scratch1, counter_addr); } - #endif /* CC_INTERP */ // Inline assembly for: // // if (thread is in interp_only_mode) { // InterpreterRuntime::post_method_entry(); --- 2558,2567 ----
*** 2595,2606 **** // SharedRuntime::rc_trace_method_entry(method, receiver); // } void InterpreterMacroAssembler::notify_method_entry() { - // C++ interpreter only uses this for native methods. - // Whenever JVMTI puts a thread in interp_only_mode, method // entry/exit events are sent for that thread to track stack // depth. If it is possible to enter interp_only_mode we add // the code to check if the event should be sent. if (JvmtiExport::can_post_interpreter_events()) { --- 2573,2582 ----
*** 2645,2655 **** // Java methods have their result stored in the expression stack void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state, NotifyMethodExitMode mode) { - // C++ interpreter only uses this for native methods. // Whenever JVMTI puts a thread in interp_only_mode, method // entry/exit events are sent for that thread to track stack // depth. If it is possible to enter interp_only_mode we add // the code to check if the event should be sent. --- 2621,2630 ----
*** 2685,2732 **** restore_return_value(state, is_native_method); } } void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { - #ifdef CC_INTERP - // result potentially in O0/O1: save it across calls - stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); - #ifdef _LP64 - stx(O0, STATE(_native_lresult)); - #else - std(O0, STATE(_native_lresult)); - #endif - #else // CC_INTERP if (is_native_call) { stf(FloatRegisterImpl::D, F0, d_tmp); #ifdef _LP64 stx(O0, l_tmp); #else std(O0, l_tmp); #endif } else { push(state); } - #endif // CC_INTERP } void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { - #ifdef CC_INTERP - ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); - #ifdef _LP64 - ldx(STATE(_native_lresult), O0); - #else - ldd(STATE(_native_lresult), O0); - #endif - #else // CC_INTERP if (is_native_call) { ldf(FloatRegisterImpl::D, d_tmp, F0); #ifdef _LP64 ldx(l_tmp, O0); #else ldd(l_tmp, O0); #endif } else { pop(state); } - #endif // CC_INTERP } --- 2660,2688 ----
src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File