src/cpu/sparc/vm/templateInterpreter_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Jul 15 19:09:50 2011
--- new/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Jul 15 19:09:50 2011

*** 188,198 **** --- 188,198 ---- Label L_got_cache, L_giant_index; const Register cache = G3_scratch; const Register size = G1_scratch; if (EnableInvokeDynamic) { __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode. ! __ cmp_and_br(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, false, Assembler::pn, L_giant_index); ! __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index); } __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); __ bind(L_got_cache); __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), size);
*** 203,213 **** --- 203,213 ---- // out of the main line of code... if (EnableInvokeDynamic) { __ bind(L_giant_index); __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4)); ! __ ba_short(L_got_cache); } return entry; }
*** 216,226 **** --- 216,226 ---- address entry = __ pc(); __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache { Label L; Address exception_addr(G2_thread, Thread::pending_exception_offset()); __ ld_ptr(exception_addr, Gtemp); // Load pending exception. ! __ br_null(Gtemp, false, Assembler::pt, L); ! __ br_null_short(Gtemp, Assembler::pt, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); __ should_not_reach_here(); __ bind(L); } __ dispatch_next(state, step);
*** 297,315 **** --- 297,315 ---- const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; Label no_mdo, done; if (ProfileInterpreter) { // If no method data exists, go to profile_continue. __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch); ! __ br_null(G4_scratch, false, Assembler::pn, no_mdo); ! __ br_null_short(G4_scratch, Assembler::pn, no_mdo); // Increment counter Address mdo_invocation_counter(G4_scratch, in_bytes(methodDataOopDesc::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, G3_scratch, Lscratch, Assembler::zero, overflow); ! __ ba_short(done); } // Increment counter in methodOop __ bind(no_mdo); Address invocation_counter(Lmethod,
*** 331,341 **** --- 331,341 ---- if (ProfileInterpreter && profile_method != NULL) { // Test to see if we should create a method data oop AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit); __ load_contents(profile_limit, G3_scratch); ! __ cmp_and_br(O0, G3_scratch, Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); ! __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); // if no method data exists, go to profile_method __ test_method_data_pointer(*profile_method); }
*** 400,426 **** --- 400,426 ---- Label after_frame_check; assert_different_registers(Rframe_size, Rscratch, Rscratch2); __ set(page_size, Rscratch); ! __ cmp_and_br(Rframe_size, Rscratch, Assembler::lessEqual, false, Assembler::pt, after_frame_check); ! __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check); // get the stack base, and in debug, verify it is non-zero __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); #ifdef ASSERT Label base_not_zero; ! __ br_notnull(Rscratch, false, Assembler::pn, base_not_zero); ! __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero); __ stop("stack base is zero in generate_stack_overflow_check"); __ bind(base_not_zero); #endif // get the stack size, and in debug, verify it is non-zero assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); #ifdef ASSERT Label size_not_zero; ! __ br_notnull(Rscratch2, false, Assembler::pn, size_not_zero); ! __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero); __ stop("stack size is zero in generate_stack_overflow_check"); __ bind(size_not_zero); #endif // compute the beginning of the protected zone minus the requested frame size
*** 432,442 **** --- 432,442 ---- // SP, which would take another register __ add( Rscratch, Rframe_size, Rscratch ); // the frame is greater than one page in size, so check against // the bottom of the stack ! __ cmp_and_brx(SP, Rscratch, Assembler::greater, false, Assembler::pt, after_frame_check); ! __ cmp_and_brx_short(SP, Rscratch, Assembler::greater, Assembler::pt, after_frame_check); // Save the return address as the exception pc __ st_ptr(O7, saved_exception_pc); // the stack will overflow, throw an exception
*** 604,614 **** --- 604,614 ---- // do nothing for empty methods (do not even increment invocation counter) if ( UseFastEmptyMethods) { // If we need a safepoint check, generate full interpreter entry. AddressLiteral sync_state(SafepointSynchronize::address_of_state()); __ set(sync_state, G3_scratch); ! __ cmp_and_br(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, false, Assembler::pn, slow_path); ! __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path); // Code: _return __ retl(); __ delayed()->mov(O5_savedSP, SP);
*** 642,657 **** --- 642,657 ---- // Check if we need to reach a safepoint and generate full interpreter // frame if so. AddressLiteral sync_state(SafepointSynchronize::address_of_state()); __ load_contents(sync_state, G3_scratch); __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); ! __ cmp_and_br(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, false, Assembler::pn, slow_path); ! __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path); // Check if local 0 != NULL __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 // check if local 0 == NULL and go the slow path ! __ br_null(Otos_i, false, Assembler::pn, slow_path); ! __ br_null_short(Otos_i, Assembler::pn, slow_path); // read first instruction word and extract bytecode @ 1 and index @ 2 // get first 4 bytes of the bytecodes (big endian!) __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
*** 673,683 **** --- 673,683 ---- // If not, need the slow path. ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch); __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); __ and3(G1_scratch, 0xFF, G1_scratch); ! __ cmp_and_br(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, false, Assembler::pn, slow_path); ! __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path); // Get the type and return field offset from the constant pool cache __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch); __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
*** 761,773 **** --- 761,772 ---- // continue and the thread will safepoint at the next bytecode dispatch. // Check if local 0 != NULL // If the receiver is null then it is OK to jump to the slow path. __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 - __ tst(Otos_i); // check if local 0 == NULL and go the slow path ! __ brx(Assembler::zero, false, Assembler::pn, slow_path); __ delayed()->nop(); ! __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path); // Load the value of the referent field. if (Assembler::is_simm13(referent_offset)) { __ load_heap_oop(Otos_i, referent_offset, Otos_i);
*** 926,936 **** --- 925,935 ---- // get signature handler { Label L; Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset()); __ ld_ptr(signature_handler, G3_scratch); ! __ br_notnull(G3_scratch, false, Assembler::pt, L); ! __ br_notnull_short(G3_scratch, Assembler::pt, L); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); __ ld_ptr(signature_handler, G3_scratch); __ bind(L); }
*** 991,1001 **** --- 990,1000 ---- __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1); __ ld_ptr(O1, mirror_offset, O1); #ifdef ASSERT if (!PrintSignatureHandlers) // do not dirty the output with this { Label L; ! __ br_notnull(O1, false, Assembler::pt, L); ! __ br_notnull_short(O1, Assembler::pt, L); __ stop("mirror is missing"); __ bind(L); } #endif // ASSERT __ st_ptr(O1, Lscratch2, 0);
*** 1008,1018 **** --- 1007,1017 ---- // Oops are boxed in-place on the stack, with handles copied to arguments. // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. #ifdef ASSERT { Label L; ! __ br_notnull(O0, false, Assembler::pt, L); ! __ br_notnull_short(O0, Assembler::pt, L); __ stop("native entry point is missing"); __ bind(L); } #endif // ASSERT
*** 1047,1057 **** --- 1046,1056 ---- Address thread_state(G2_thread, JavaThread::thread_state_offset()); #ifdef ASSERT { Label L; __ ld(thread_state, G3_scratch); ! __ cmp_and_br(G3_scratch, _thread_in_Java, Assembler::equal, false, Assembler::pt, L); ! __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L); __ stop("Wrong thread state in native stub"); __ bind(L); } #endif // ASSERT __ set(_thread_in_native, G3_scratch);
*** 1100,1110 **** --- 1099,1109 ---- __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); Label L; __ br(Assembler::notEqual, false, Assembler::pn, L); __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); ! __ br_zero(G3_scratch, no_block); ! __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); __ bind(L); // Block. Save any potential method result value before the operation and // use a leaf call to leave the last_Java_frame setup undisturbed. save_native_result();
*** 1149,1159 **** --- 1148,1158 ---- { Label no_oop, store_result; __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); ! __ cmp_and_brx(G3_scratch, Lscratch, Assembler::notEqual, false, Assembler::pt, no_oop); ! __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop); __ addcc(G0, O0, O0); __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: __ delayed()->ld_ptr(O0, 0, O0); // unbox it __ mov(G0, O0);
*** 1168,1178 **** --- 1167,1177 ---- // handle exceptions (exception handling will handle unlocking!) { Label L; Address exception_addr(G2_thread, Thread::pending_exception_offset()); __ ld_ptr(exception_addr, Gtemp); ! __ br_null(Gtemp, false, Assembler::pt, L); ! __ br_null_short(Gtemp, Assembler::pt, L); // Note: This could be handled more efficiently since we know that the native // method doesn't have an exception handler. We could directly return // to the exception handler for the caller. __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); __ should_not_reach_here();
*** 1205,1215 **** --- 1204,1214 ---- // dispose of return address and remove activation #ifdef ASSERT { Label ok; ! __ cmp_and_brx(I5_savedSP, FP, Assembler::greaterEqualUnsigned, false, Assembler::pt, ok); ! __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok); __ stop("bad I5_savedSP value"); __ should_not_reach_here(); __ bind(ok); } #endif
*** 1387,1397 **** --- 1386,1396 ---- // We have decided to profile this method in the interpreter __ bind(profile_method); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); __ set_method_data_pointer_for_bcp(); ! __ ba_short(profile_method_continue); } // handle invocation counter overflow __ bind(invocation_counter_overflow); generate_counter_overflow(Lcontinue);
*** 1813,1823 **** --- 1812,1822 ---- // Note that we don't compare the return PC against the // deoptimization blob's unpack entry because of the presence of // adapter frames in C2. Label caller_not_deoptimized; __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); ! __ br_notnull(O0, false, Assembler::pt, caller_not_deoptimized); ! __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized); const Register Gtmp1 = G3_scratch; const Register Gtmp2 = G1_scratch; // Compute size of arguments for saving when returning to deoptimized caller
*** 1947,1960 **** --- 1946,1959 ---- // Helper for vtos entry point generation void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); Label L; ! aep = __ pc(); __ push_ptr(); __ ba_short(L); ! fep = __ pc(); __ push_f(); __ ba_short(L); ! dep = __ pc(); __ push_d(); __ ba_short(L); ! lep = __ pc(); __ push_l(); __ ba_short(L); iep = __ pc(); __ push_i(); bep = cep = sep = iep; // there aren't any vep = __ pc(); __ bind(L); // fall through generate_and_dispatch(t); }

src/cpu/sparc/vm/templateInterpreter_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File