1518 __ stw(R0/*thread_state*/, thread_(thread_state));
1519 if (UseMembar) {
1520 __ fence();
1521 }
1522 // Write serialization page so that the VM thread can do a pseudo remote
1523 // membar. We use the current thread pointer to calculate a thread
1524 // specific offset to write to within the page. This minimizes bus
1525 // traffic due to cache line collision.
1526 else {
1527 __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
1528 }
1529
1530 // Now before we return to java we must look for a current safepoint
1531 // (a new safepoint can not start since we entered native_trans).
1532 // We must check here because a current safepoint could be modifying
1533 // the callers registers right this moment.
1534
1535 // Acquire isn't strictly necessary here because of the fence, but
1536 // sync_state is declared to be volatile, so we do it anyway
1537 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
1538 int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
1539
1540 // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
1541 __ lwz(sync_state, sync_state_offs, sync_state_addr);
1542
1543 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
1544 __ lwz(suspend_flags, thread_(suspend_flags));
1545
1546 Label sync_check_done;
1547 Label do_safepoint;
1548 // No synchronization in progress nor yet synchronized.
1549 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
1550 // Not suspended.
1551 __ cmpwi(CCR1, suspend_flags, 0);
1552
1553 __ bne(CCR0, do_safepoint);
1554 __ beq(CCR1, sync_check_done);
1555 __ bind(do_safepoint);
1556 __ isync();
1557 // Block. We do the call directly and leave the current
1558 // last_Java_frame setup undisturbed. We must save any possible
1559 // native result across the call. No oop is present.
1560
1561 __ mr(R3_ARG1, R16_thread);
1562 #if defined(ABI_ELFv2)
1563 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1564 relocInfo::none);
1565 #else
1566 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
1567 relocInfo::none);
1568 #endif
1569
1570 __ bind(sync_check_done);
1571
1572 //=============================================================================
1573 // <<<<<< Back in Interpreter Frame >>>>>
1574
1575 // We are in thread_in_native_trans here and back in the normal
1576 // interpreter frame. We don't have to do anything special about
1577 // safepoints and we can switch to Java mode anytime we are ready.
1578
1579 // Note: frame::interpreter_frame_result has a dependency on how the
1580 // method result is saved across the call to post_method_exit. For
1581 // native methods it assumes that the non-FPU/non-void result is
1582 // saved in _native_lresult and a FPU result in _native_fresult. If
1583 // this changes then the interpreter_frame_result implementation
1584 // will need to be updated too.
1585
1586 // On PPC64, we have stored the result directly after the native call.
1587
1588 //=============================================================================
1589 // Back in Java
1590
1591 // We use release_store_fence to update values like the thread state, where
1592 // we don't want the current thread to continue until all our prior memory
1593 // accesses (including the new thread state) are visible to other threads.
1594 __ li(R0/*thread_state*/, _thread_in_Java);
1595 __ release();
1596 __ stw(R0/*thread_state*/, thread_(thread_state));
1597
1598 if (CheckJNICalls) {
1599 // clear_pending_jni_exception_check
1600 __ load_const_optimized(R0, 0L);
1601 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread);
1602 }
1603
1604 __ reset_last_Java_frame();
1605
1606 // Jvmdi/jvmpi support. Whether we've got an exception pending or
1607 // not, and whether unlocking throws an exception or not, we notify
1608 // on native method exit. If we do have an exception, we'll end up
1609 // in the caller's context to handle it, so if we don't do the
1610 // notify here, we'll drop it on the floor.
1611 __ notify_method_exit(true/*native method*/,
1612 ilgl /*illegal state (not used for native methods)*/,
1613 InterpreterMacroAssembler::NotifyJVMTI,
1614 false /*check_exceptions*/);
1615
1841 //
1842 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments.
1843 // Their contents is not constant but may change according to the requirements
1844 // of the emitted code.
1845 //
1846 // All other registers from the scratch/work register set are used "internally"
1847 // and contain garbage (i.e. unpredictable values) once blr() is reached.
1848 // Basically, only R3_RET contains a defined value which is the function result.
1849 //
1850 /**
1851 * Method entry for static native methods:
1852 * int java.util.zip.CRC32.update(int crc, int b)
1853 */
1854 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
1855 if (UseCRC32Intrinsics) {
1856 address start = __ pc(); // Remember stub start address (is rtn value).
1857 Label slow_path;
1858
1859 // Safepoint check
1860 const Register sync_state = R11_scratch1;
1861 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
1862 __ lwz(sync_state, sync_state_offs, sync_state);
1863 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
1864 __ bne(CCR0, slow_path);
1865
1866 // We don't generate local frame and don't align stack because
1867 // we not even call stub code (we generate the code inline)
1868 // and there is no safepoint on this path.
1869
1870 // Load java parameters.
1871 // R15_esp is callers operand stack pointer, i.e. it points to the parameters.
1872 const Register argP = R15_esp;
1873 const Register crc = R3_ARG1; // crc value
1874 const Register data = R4_ARG2; // address of java byte value (kernel_crc32 needs address)
1875 const Register dataLen = R5_ARG3; // source data len (1 byte). Not used because calling the single-byte emitter.
1876 const Register table = R6_ARG4; // address of crc32 table
1877 const Register tmp = dataLen; // Reuse unused len register to show we don't actually need a separate tmp here.
1878
1879 BLOCK_COMMENT("CRC32_update {");
1880
1881 // Arguments are reversed on java expression stack
1882 #ifdef VM_LITTLE_ENDIAN
1883 __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
1884 // Being passed as an int, the single byte is at offset +0.
1901 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
1902 return start;
1903 }
1904
1905 return NULL;
1906 }
1907
1908
1909 /**
1910 * Method entry for static native methods:
1911 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
1912 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
1913 */
1914 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1915 if (UseCRC32Intrinsics) {
1916 address start = __ pc(); // Remember stub start address (is rtn value).
1917 Label slow_path;
1918
1919 // Safepoint check
1920 const Register sync_state = R11_scratch1;
1921 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
1922 __ lwz(sync_state, sync_state_offs, sync_state);
1923 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
1924 __ bne(CCR0, slow_path);
1925
1926 // We don't generate local frame and don't align stack because
1927 // we not even call stub code (we generate the code inline)
1928 // and there is no safepoint on this path.
1929
1930 // Load parameters.
1931 // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
1932 const Register argP = R15_esp;
1933 const Register crc = R3_ARG1; // crc value
1934 const Register data = R4_ARG2; // address of java byte array
1935 const Register dataLen = R5_ARG3; // source data len
1936 const Register table = R6_ARG4; // address of crc32 table
1937
1938 const Register t0 = R9; // scratch registers for crc calculation
1939 const Register t1 = R10;
1940 const Register t2 = R11;
1941 const Register t3 = R12;
1942
1943 const Register tc0 = R2; // registers to hold pre-calculated column addresses
1944 const Register tc1 = R7;
|
1518 __ stw(R0/*thread_state*/, thread_(thread_state));
1519 if (UseMembar) {
1520 __ fence();
1521 }
1522 // Write serialization page so that the VM thread can do a pseudo remote
1523 // membar. We use the current thread pointer to calculate a thread
1524 // specific offset to write to within the page. This minimizes bus
1525 // traffic due to cache line collision.
1526 else {
1527 __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
1528 }
1529
1530 // Now before we return to java we must look for a current safepoint
1531 // (a new safepoint can not start since we entered native_trans).
1532 // We must check here because a current safepoint could be modifying
1533 // the callers registers right this moment.
1534
1535 // Acquire isn't strictly necessary here because of the fence, but
1536 // sync_state is declared to be volatile, so we do it anyway
1537 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
1538
1539 Label do_safepoint, sync_check_done;
1540 // No synchronization in progress nor yet synchronized.
1541 __ safepoint_poll(do_safepoint, sync_state);
1542
1543 // Not suspended.
1544 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
1545 __ lwz(suspend_flags, thread_(suspend_flags));
1546 __ cmpwi(CCR1, suspend_flags, 0);
1547 __ beq(CCR1, sync_check_done);
1548
1549 __ bind(do_safepoint);
1550 __ isync();
1551 // Block. We do the call directly and leave the current
1552 // last_Java_frame setup undisturbed. We must save any possible
1553 // native result across the call. No oop is present.
1554
1555 __ mr(R3_ARG1, R16_thread);
1556 #if defined(ABI_ELFv2)
1557 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1558 relocInfo::none);
1559 #else
1560 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
1561 relocInfo::none);
1562 #endif
1563
1564 __ bind(sync_check_done);
1565
1566 //=============================================================================
1567 // <<<<<< Back in Interpreter Frame >>>>>
1568
1569 // We are in thread_in_native_trans here and back in the normal
1570 // interpreter frame. We don't have to do anything special about
1571 // safepoints and we can switch to Java mode anytime we are ready.
1572
1573 // Note: frame::interpreter_frame_result has a dependency on how the
1574 // method result is saved across the call to post_method_exit. For
1575 // native methods it assumes that the non-FPU/non-void result is
1576 // saved in _native_lresult and a FPU result in _native_fresult. If
1577 // this changes then the interpreter_frame_result implementation
1578 // will need to be updated too.
1579
1580 // On PPC64, we have stored the result directly after the native call.
1581
1582 //=============================================================================
1583 // Back in Java
1584
1585 // We use release_store_fence to update values like the thread state, where
1586 // we don't want the current thread to continue until all our prior memory
1587 // accesses (including the new thread state) are visible to other threads.
1588 __ li(R0/*thread_state*/, _thread_in_Java);
1589 __ lwsync(); // Acquire safepoint and suspend state, release thread state.
1590 __ stw(R0/*thread_state*/, thread_(thread_state));
1591
1592 if (CheckJNICalls) {
1593 // clear_pending_jni_exception_check
1594 __ load_const_optimized(R0, 0L);
1595 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread);
1596 }
1597
1598 __ reset_last_Java_frame();
1599
1600 // Jvmdi/jvmpi support. Whether we've got an exception pending or
1601 // not, and whether unlocking throws an exception or not, we notify
1602 // on native method exit. If we do have an exception, we'll end up
1603 // in the caller's context to handle it, so if we don't do the
1604 // notify here, we'll drop it on the floor.
1605 __ notify_method_exit(true/*native method*/,
1606 ilgl /*illegal state (not used for native methods)*/,
1607 InterpreterMacroAssembler::NotifyJVMTI,
1608 false /*check_exceptions*/);
1609
1835 //
1836 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments.
1837 // Their contents is not constant but may change according to the requirements
1838 // of the emitted code.
1839 //
1840 // All other registers from the scratch/work register set are used "internally"
1841 // and contain garbage (i.e. unpredictable values) once blr() is reached.
1842 // Basically, only R3_RET contains a defined value which is the function result.
1843 //
1844 /**
1845 * Method entry for static native methods:
1846 * int java.util.zip.CRC32.update(int crc, int b)
1847 */
1848 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
1849 if (UseCRC32Intrinsics) {
1850 address start = __ pc(); // Remember stub start address (is rtn value).
1851 Label slow_path;
1852
1853 // Safepoint check
1854 const Register sync_state = R11_scratch1;
1855 __ safepoint_poll(slow_path, sync_state);
1856
1857 // We don't generate local frame and don't align stack because
1858 // we not even call stub code (we generate the code inline)
1859 // and there is no safepoint on this path.
1860
1861 // Load java parameters.
1862 // R15_esp is callers operand stack pointer, i.e. it points to the parameters.
1863 const Register argP = R15_esp;
1864 const Register crc = R3_ARG1; // crc value
1865 const Register data = R4_ARG2; // address of java byte value (kernel_crc32 needs address)
1866 const Register dataLen = R5_ARG3; // source data len (1 byte). Not used because calling the single-byte emitter.
1867 const Register table = R6_ARG4; // address of crc32 table
1868 const Register tmp = dataLen; // Reuse unused len register to show we don't actually need a separate tmp here.
1869
1870 BLOCK_COMMENT("CRC32_update {");
1871
1872 // Arguments are reversed on java expression stack
1873 #ifdef VM_LITTLE_ENDIAN
1874 __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
1875 // Being passed as an int, the single byte is at offset +0.
1892 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
1893 return start;
1894 }
1895
1896 return NULL;
1897 }
1898
1899
1900 /**
1901 * Method entry for static native methods:
1902 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
1903 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
1904 */
1905 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1906 if (UseCRC32Intrinsics) {
1907 address start = __ pc(); // Remember stub start address (is rtn value).
1908 Label slow_path;
1909
1910 // Safepoint check
1911 const Register sync_state = R11_scratch1;
1912 __ safepoint_poll(slow_path, sync_state);
1913
1914 // We don't generate local frame and don't align stack because
1915 // we not even call stub code (we generate the code inline)
1916 // and there is no safepoint on this path.
1917
1918 // Load parameters.
1919 // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
1920 const Register argP = R15_esp;
1921 const Register crc = R3_ARG1; // crc value
1922 const Register data = R4_ARG2; // address of java byte array
1923 const Register dataLen = R5_ARG3; // source data len
1924 const Register table = R6_ARG4; // address of crc32 table
1925
1926 const Register t0 = R9; // scratch registers for crc calculation
1927 const Register t1 = R10;
1928 const Register t2 = R11;
1929 const Register t3 = R12;
1930
1931 const Register tc0 = R2; // registers to hold pre-calculated column addresses
1932 const Register tc1 = R7;
|