< prev index next >

src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp

Print this page




 864 
 865   // The stack overflows. Load target address of the runtime stub and call it.
 866   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
 867   __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
 868   __ mtctr(Rscratch1);
 869   // Restore caller_sp.
 870 #ifdef ASSERT
 871   __ ld(Rscratch1, 0, R1_SP);
 872   __ ld(R0, 0, R21_sender_SP);
 873   __ cmpd(CCR0, R0, Rscratch1);
 874   __ asm_assert_eq("backlink", 0x547);
 875 #endif // ASSERT
 876   __ mr(R1_SP, R21_sender_SP);
 877   __ bctr();
 878 
 879   __ align(32, 12);
 880   __ bind(done);
 881   BLOCK_COMMENT("} stack_overflow_check_with_compare");
 882 }
 883 
 884 void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
 885   __ unlock_object(R26_monitor, check_exceptions);
 886 }
 887 
 888 // Lock the current method, interpreter register window must be set up!
 889 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
 890   const Register Robj_to_lock = Rscratch2;
 891 
 892   {
 893     if (!flags_preloaded) {
 894       __ lwz(Rflags, method_(access_flags));
 895     }
 896 
 897 #ifdef ASSERT
 898     // Check if methods needs synchronization.
 899     {
 900       Label Lok;
 901       __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
 902       __ btrue(CCR0,Lok);
 903       __ stop("method doesn't need synchronization");
 904       __ bind(Lok);
 905     }
 906 #endif // ASSERT
 907   }


1549   }
1550 
1551   __ reset_last_Java_frame();
1552 
1553   // Jvmdi/jvmpi support. Whether we've got an exception pending or
1554   // not, and whether unlocking throws an exception or not, we notify
1555   // on native method exit. If we do have an exception, we'll end up
1556   // in the caller's context to handle it, so if we don't do the
1557   // notify here, we'll drop it on the floor.
1558   __ notify_method_exit(true/*native method*/,
1559                         ilgl /*illegal state (not used for native methods)*/,
1560                         InterpreterMacroAssembler::NotifyJVMTI,
1561                         false /*check_exceptions*/);
1562 
1563   //=============================================================================
1564   // Handle exceptions
1565 
1566   if (synchronized) {
1567     // Don't check for exceptions since we're still in the i2n frame. Do that
1568     // manually afterwards.
1569     unlock_method(false);
1570   }
1571 
1572   // Reset active handles after returning from native.
1573   // thread->active_handles()->clear();
1574   __ ld(active_handles, thread_(active_handles));
1575   // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
1576   __ li(R0, 0);
1577   __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
1578 
1579   Label exception_return_sync_check_already_unlocked;
1580   __ ld(R0/*pending_exception*/, thread_(pending_exception));
1581   __ cmpdi(CCR0, R0/*pending_exception*/, 0);
1582   __ bne(CCR0, exception_return_sync_check_already_unlocked);
1583 
1584   //-----------------------------------------------------------------------------
1585   // No exception pending.
1586 
1587   // Move native method result back into proper registers and return.
1588   // Invoke result handler (may unbox/promote).
1589   __ ld(R11_scratch1, 0, R1_SP);


1592   __ call_stub(result_handler_addr);
1593 
1594   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1595 
1596   // Must use the return pc which was loaded from the caller's frame
1597   // as the VM uses return-pc-patching for deoptimization.
1598   __ mtlr(R0);
1599   __ blr();
1600 
1601   //-----------------------------------------------------------------------------
1602   // An exception is pending. We call into the runtime only if the
1603   // caller was not interpreted. If it was interpreted the
1604   // interpreter will do the correct thing. If it isn't interpreted
1605   // (call stub/compiled code) we will change our return and continue.
1606 
1607   BIND(exception_return_sync_check);
1608 
1609   if (synchronized) {
1610     // Don't check for exceptions since we're still in the i2n frame. Do that
1611     // manually afterwards.
1612     unlock_method(false);
1613   }
1614   BIND(exception_return_sync_check_already_unlocked);
1615 
1616   const Register return_pc = R31;
1617 
1618   __ ld(return_pc, 0, R1_SP);
1619   __ ld(return_pc, _abi(lr), return_pc);
1620 
1621   // Get the address of the exception handler.
1622   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1623                   R16_thread,
1624                   return_pc /* return pc */);
1625   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
1626 
1627   // Load the PC of the the exception handler into LR.
1628   __ mtlr(R3_RET);
1629 
1630   // Load exception into R3_ARG1 and clear pending exception in thread.
1631   __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
1632   __ li(R4_ARG2, 0);




 864 
 865   // The stack overflows. Load target address of the runtime stub and call it.
 866   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
 867   __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
 868   __ mtctr(Rscratch1);
 869   // Restore caller_sp.
 870 #ifdef ASSERT
 871   __ ld(Rscratch1, 0, R1_SP);
 872   __ ld(R0, 0, R21_sender_SP);
 873   __ cmpd(CCR0, R0, Rscratch1);
 874   __ asm_assert_eq("backlink", 0x547);
 875 #endif // ASSERT
 876   __ mr(R1_SP, R21_sender_SP);
 877   __ bctr();
 878 
 879   __ align(32, 12);
 880   __ bind(done);
 881   BLOCK_COMMENT("} stack_overflow_check_with_compare");
 882 }
 883 




 884 // Lock the current method, interpreter register window must be set up!
 885 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
 886   const Register Robj_to_lock = Rscratch2;
 887 
 888   {
 889     if (!flags_preloaded) {
 890       __ lwz(Rflags, method_(access_flags));
 891     }
 892 
 893 #ifdef ASSERT
 894     // Check if methods needs synchronization.
 895     {
 896       Label Lok;
 897       __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
 898       __ btrue(CCR0,Lok);
 899       __ stop("method doesn't need synchronization");
 900       __ bind(Lok);
 901     }
 902 #endif // ASSERT
 903   }


1545   }
1546 
1547   __ reset_last_Java_frame();
1548 
1549   // Jvmdi/jvmpi support. Whether we've got an exception pending or
1550   // not, and whether unlocking throws an exception or not, we notify
1551   // on native method exit. If we do have an exception, we'll end up
1552   // in the caller's context to handle it, so if we don't do the
1553   // notify here, we'll drop it on the floor.
1554   __ notify_method_exit(true/*native method*/,
1555                         ilgl /*illegal state (not used for native methods)*/,
1556                         InterpreterMacroAssembler::NotifyJVMTI,
1557                         false /*check_exceptions*/);
1558 
1559   //=============================================================================
1560   // Handle exceptions
1561 
1562   if (synchronized) {
1563     // Don't check for exceptions since we're still in the i2n frame. Do that
1564     // manually afterwards.
1565     __ unlock_object(R26_monitor, false); // Can also unlock methods.
1566   }
1567 
1568   // Reset active handles after returning from native.
1569   // thread->active_handles()->clear();
1570   __ ld(active_handles, thread_(active_handles));
1571   // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
1572   __ li(R0, 0);
1573   __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
1574 
1575   Label exception_return_sync_check_already_unlocked;
1576   __ ld(R0/*pending_exception*/, thread_(pending_exception));
1577   __ cmpdi(CCR0, R0/*pending_exception*/, 0);
1578   __ bne(CCR0, exception_return_sync_check_already_unlocked);
1579 
1580   //-----------------------------------------------------------------------------
1581   // No exception pending.
1582 
1583   // Move native method result back into proper registers and return.
1584   // Invoke result handler (may unbox/promote).
1585   __ ld(R11_scratch1, 0, R1_SP);


1588   __ call_stub(result_handler_addr);
1589 
1590   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1591 
1592   // Must use the return pc which was loaded from the caller's frame
1593   // as the VM uses return-pc-patching for deoptimization.
1594   __ mtlr(R0);
1595   __ blr();
1596 
1597   //-----------------------------------------------------------------------------
1598   // An exception is pending. We call into the runtime only if the
1599   // caller was not interpreted. If it was interpreted the
1600   // interpreter will do the correct thing. If it isn't interpreted
1601   // (call stub/compiled code) we will change our return and continue.
1602 
1603   BIND(exception_return_sync_check);
1604 
1605   if (synchronized) {
1606     // Don't check for exceptions since we're still in the i2n frame. Do that
1607     // manually afterwards.
1608     __ unlock_object(R26_monitor, false); // Can also unlock methods.
1609   }
1610   BIND(exception_return_sync_check_already_unlocked);
1611 
1612   const Register return_pc = R31;
1613 
1614   __ ld(return_pc, 0, R1_SP);
1615   __ ld(return_pc, _abi(lr), return_pc);
1616 
1617   // Get the address of the exception handler.
1618   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1619                   R16_thread,
1620                   return_pc /* return pc */);
1621   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
1622 
1623   // Load the PC of the the exception handler into LR.
1624   __ mtlr(R3_RET);
1625 
1626   // Load exception into R3_ARG1 and clear pending exception in thread.
1627   __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
1628   __ li(R4_ARG2, 0);


< prev index next >