< prev index next >

src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp

Print this page
rev 11567 : 8160245: C1: Clean up platform #defines in c1_LIR.hpp.
Summary: Also add fnoreg on x86, LIR_Address constructor without scale, and clean up templateInterpreterGenerator.hpp.


 835   //
 836   // Unlike the C++ interpreter above: Check exceptions!
 837   // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
 838   // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
 839 
 840   __ li(R4_ARG2, 0);
 841   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
 842 
 843   // Returns verified_entry_point or NULL.
 844   // We ignore it in any case.
 845   __ b(continue_entry);
 846 }
 847 
 848 // See if we've got enough room on the stack for locals plus overhead below
 849 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 850 // without going through the signal handler, i.e., reserved and yellow zones
 851 // will not be made usable. The shadow zone must suffice to handle the
 852 // overflow.
 853 //
 854 // Kills Rmem_frame_size, Rscratch1.
 855 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
 856   Label done;
 857   assert_different_registers(Rmem_frame_size, Rscratch1);

 858 
 859   BLOCK_COMMENT("stack_overflow_check_with_compare {");
 860   __ sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
 861   __ ld(Rscratch1, thread_(stack_overflow_limit));
 862   __ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
 863   __ bgt(CCR0/*is_stack_overflow*/, done);
 864 
 865   // The stack overflows. Load target address of the runtime stub and call it.
 866   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
 867   __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
 868   __ mtctr(Rscratch1);
 869   // Restore caller_sp.
 870 #ifdef ASSERT
 871   __ ld(Rscratch1, 0, R1_SP);
 872   __ ld(R0, 0, R21_sender_SP);
 873   __ cmpd(CCR0, R0, Rscratch1);
 874   __ asm_assert_eq("backlink", 0x547);
 875 #endif // ASSERT
 876   __ mr(R1_SP, R21_sender_SP);
 877   __ bctr();
 878 
 879   __ align(32, 12);
 880   __ bind(done);
 881   BLOCK_COMMENT("} stack_overflow_check_with_compare");
 882 }
 883 
 884 void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
 885   __ unlock_object(R26_monitor, check_exceptions);
 886 }
 887 
 888 // Lock the current method, interpreter register window must be set up!
 889 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
 890   const Register Robj_to_lock = Rscratch2;
 891 
 892   {
 893     if (!flags_preloaded) {
 894       __ lwz(Rflags, method_(access_flags));
 895     }
 896 
 897 #ifdef ASSERT
 898     // Check if methods needs synchronization.
 899     {
 900       Label Lok;
 901       __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
 902       __ btrue(CCR0,Lok);
 903       __ stop("method doesn't need synchronization");
 904       __ bind(Lok);
 905     }
 906 #endif // ASSERT
 907   }


1549   }
1550 
1551   __ reset_last_Java_frame();
1552 
1553   // Jvmdi/jvmpi support. Whether we've got an exception pending or
1554   // not, and whether unlocking throws an exception or not, we notify
1555   // on native method exit. If we do have an exception, we'll end up
1556   // in the caller's context to handle it, so if we don't do the
1557   // notify here, we'll drop it on the floor.
1558   __ notify_method_exit(true/*native method*/,
1559                         ilgl /*illegal state (not used for native methods)*/,
1560                         InterpreterMacroAssembler::NotifyJVMTI,
1561                         false /*check_exceptions*/);
1562 
1563   //=============================================================================
1564   // Handle exceptions
1565 
1566   if (synchronized) {
1567     // Don't check for exceptions since we're still in the i2n frame. Do that
1568     // manually afterwards.
1569     unlock_method(false);

1570   }
1571 
1572   // Reset active handles after returning from native.
1573   // thread->active_handles()->clear();
1574   __ ld(active_handles, thread_(active_handles));
1575   // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
1576   __ li(R0, 0);
1577   __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
1578 
1579   Label exception_return_sync_check_already_unlocked;
1580   __ ld(R0/*pending_exception*/, thread_(pending_exception));
1581   __ cmpdi(CCR0, R0/*pending_exception*/, 0);
1582   __ bne(CCR0, exception_return_sync_check_already_unlocked);
1583 
1584   //-----------------------------------------------------------------------------
1585   // No exception pending.
1586 
1587   // Move native method result back into proper registers and return.
1588   // Invoke result handler (may unbox/promote).
1589   __ ld(R11_scratch1, 0, R1_SP);


1592   __ call_stub(result_handler_addr);
1593 
1594   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1595 
1596   // Must use the return pc which was loaded from the caller's frame
1597   // as the VM uses return-pc-patching for deoptimization.
1598   __ mtlr(R0);
1599   __ blr();
1600 
1601   //-----------------------------------------------------------------------------
1602   // An exception is pending. We call into the runtime only if the
1603   // caller was not interpreted. If it was interpreted the
1604   // interpreter will do the correct thing. If it isn't interpreted
1605   // (call stub/compiled code) we will change our return and continue.
1606 
1607   BIND(exception_return_sync_check);
1608 
1609   if (synchronized) {
1610     // Don't check for exceptions since we're still in the i2n frame. Do that
1611     // manually afterwards.
1612     unlock_method(false);
1613   }
1614   BIND(exception_return_sync_check_already_unlocked);
1615 
1616   const Register return_pc = R31;
1617 
1618   __ ld(return_pc, 0, R1_SP);
1619   __ ld(return_pc, _abi(lr), return_pc);
1620 
1621   // Get the address of the exception handler.
1622   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1623                   R16_thread,
1624                   return_pc /* return pc */);
1625   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
1626 
1627   // Load the PC of the the exception handler into LR.
1628   __ mtlr(R3_RET);
1629 
1630   // Load exception into R3_ARG1 and clear pending exception in thread.
1631   __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
1632   __ li(R4_ARG2, 0);




 835   //
 836   // Unlike the C++ interpreter above: Check exceptions!
 837   // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
 838   // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
 839 
 840   __ li(R4_ARG2, 0);
 841   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
 842 
 843   // Returns verified_entry_point or NULL.
 844   // We ignore it in any case.
 845   __ b(continue_entry);
 846 }
 847 
 848 // See if we've got enough room on the stack for locals plus overhead below
 849 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 850 // without going through the signal handler, i.e., reserved and yellow zones
 851 // will not be made usable. The shadow zone must suffice to handle the
 852 // overflow.
 853 //
 854 // Kills Rmem_frame_size, Rscratch1.
 855 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1, Register unused) {
 856   Label done;
 857   assert_different_registers(Rmem_frame_size, Rscratch1);
 858   assert(unused == noreg, "not needed on ppc");
 859 
 860   BLOCK_COMMENT("stack_overflow_check_with_compare {");
 861   __ sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
 862   __ ld(Rscratch1, thread_(stack_overflow_limit));
 863   __ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
 864   __ bgt(CCR0/*is_stack_overflow*/, done);
 865 
 866   // The stack overflows. Load target address of the runtime stub and call it.
 867   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
 868   __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
 869   __ mtctr(Rscratch1);
 870   // Restore caller_sp.
 871 #ifdef ASSERT
 872   __ ld(Rscratch1, 0, R1_SP);
 873   __ ld(R0, 0, R21_sender_SP);
 874   __ cmpd(CCR0, R0, Rscratch1);
 875   __ asm_assert_eq("backlink", 0x547);
 876 #endif // ASSERT
 877   __ mr(R1_SP, R21_sender_SP);
 878   __ bctr();
 879 
 880   __ align(32, 12);
 881   __ bind(done);
 882   BLOCK_COMMENT("} stack_overflow_check_with_compare");
 883 }
 884 




 885 // Lock the current method, interpreter register window must be set up!
 886 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
 887   const Register Robj_to_lock = Rscratch2;
 888 
 889   {
 890     if (!flags_preloaded) {
 891       __ lwz(Rflags, method_(access_flags));
 892     }
 893 
 894 #ifdef ASSERT
 895     // Check if methods needs synchronization.
 896     {
 897       Label Lok;
 898       __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
 899       __ btrue(CCR0,Lok);
 900       __ stop("method doesn't need synchronization");
 901       __ bind(Lok);
 902     }
 903 #endif // ASSERT
 904   }


1546   }
1547 
1548   __ reset_last_Java_frame();
1549 
1550   // Jvmdi/jvmpi support. Whether we've got an exception pending or
1551   // not, and whether unlocking throws an exception or not, we notify
1552   // on native method exit. If we do have an exception, we'll end up
1553   // in the caller's context to handle it, so if we don't do the
1554   // notify here, we'll drop it on the floor.
1555   __ notify_method_exit(true/*native method*/,
1556                         ilgl /*illegal state (not used for native methods)*/,
1557                         InterpreterMacroAssembler::NotifyJVMTI,
1558                         false /*check_exceptions*/);
1559 
1560   //=============================================================================
1561   // Handle exceptions
1562 
1563   if (synchronized) {
1564     // Don't check for exceptions since we're still in the i2n frame. Do that
1565     // manually afterwards.
1566     __ unlock_object(R26_monitor, false); // Can also unlock methods.
1567 
1568   }
1569 
1570   // Reset active handles after returning from native.
1571   // thread->active_handles()->clear();
1572   __ ld(active_handles, thread_(active_handles));
1573   // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
1574   __ li(R0, 0);
1575   __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
1576 
1577   Label exception_return_sync_check_already_unlocked;
1578   __ ld(R0/*pending_exception*/, thread_(pending_exception));
1579   __ cmpdi(CCR0, R0/*pending_exception*/, 0);
1580   __ bne(CCR0, exception_return_sync_check_already_unlocked);
1581 
1582   //-----------------------------------------------------------------------------
1583   // No exception pending.
1584 
1585   // Move native method result back into proper registers and return.
1586   // Invoke result handler (may unbox/promote).
1587   __ ld(R11_scratch1, 0, R1_SP);


1590   __ call_stub(result_handler_addr);
1591 
1592   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1593 
1594   // Must use the return pc which was loaded from the caller's frame
1595   // as the VM uses return-pc-patching for deoptimization.
1596   __ mtlr(R0);
1597   __ blr();
1598 
1599   //-----------------------------------------------------------------------------
1600   // An exception is pending. We call into the runtime only if the
1601   // caller was not interpreted. If it was interpreted the
1602   // interpreter will do the correct thing. If it isn't interpreted
1603   // (call stub/compiled code) we will change our return and continue.
1604 
1605   BIND(exception_return_sync_check);
1606 
1607   if (synchronized) {
1608     // Don't check for exceptions since we're still in the i2n frame. Do that
1609     // manually afterwards.
1610     __ unlock_object(R26_monitor, false); // Can also unlock methods.
1611   }
1612   BIND(exception_return_sync_check_already_unlocked);
1613 
1614   const Register return_pc = R31;
1615 
1616   __ ld(return_pc, 0, R1_SP);
1617   __ ld(return_pc, _abi(lr), return_pc);
1618 
1619   // Get the address of the exception handler.
1620   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1621                   R16_thread,
1622                   return_pc /* return pc */);
1623   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
1624 
1625   // Load the PC of the the exception handler into LR.
1626   __ mtlr(R3_RET);
1627 
1628   // Load exception into R3_ARG1 and clear pending exception in thread.
1629   __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
1630   __ li(R4_ARG2, 0);


< prev index next >