< prev index next >

src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp

Print this page
rev 47415 : Add Thread Local handshakes and thread local polling


 895     return entry;
 896   }
 897 #endif // INCLUDE_ALL_GCS
 898 
 899   // If G1 is not enabled then attempt to go through the accessor entry point
 900   // Reference.get is an accessor
 901   return NULL;
 902 }
 903 
 904 /**
 905  * Method entry for static native methods:
 906  *   int java.util.zip.CRC32.update(int crc, int b)
 907  */
 908 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 909 
 910   if (UseCRC32Intrinsics) {
 911     address entry = __ pc();
 912 
 913     Label L_slow_path;
 914     // If we need a safepoint check, generate full interpreter entry.
 915     ExternalAddress state(SafepointSynchronize::address_of_state());
 916     __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
 917     __ set(SafepointSynchronize::_not_synchronized, O3);
 918     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
 919 
 920     // Load parameters
 921     const Register crc   = O0; // initial crc
 922     const Register val   = O1; // byte to update with
 923     const Register table = O2; // address of 256-entry lookup table
 924 
 925     __ ldub(Gargs, 3, val);
 926     __ lduw(Gargs, 8, crc);
 927 
 928     __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
 929 
 930     __ not1(crc); // ~crc
 931     __ clruwu(crc);
 932     __ update_byte_crc32(crc, val, table);
 933     __ not1(crc); // ~crc
 934 
 935     // result in O0
 936     __ retl();
 937     __ delayed()->nop();
 938 
 939     // generate a vanilla native entry as the slow path
 940     __ bind(L_slow_path);
 941     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
 942     return entry;
 943   }
 944   return NULL;
 945 }
 946 
 947 /**
 948  * Method entry for static native methods:
 949  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 950  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 951  */
 952 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 953 
 954   if (UseCRC32Intrinsics) {
 955     address entry = __ pc();
 956 
 957     Label L_slow_path;
 958     // If we need a safepoint check, generate full interpreter entry.
 959     ExternalAddress state(SafepointSynchronize::address_of_state());
 960     __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
 961     __ set(SafepointSynchronize::_not_synchronized, O3);
 962     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
 963 
 964     // Load parameters from the stack
 965     const Register crc    = O0; // initial crc
 966     const Register buf    = O1; // source java byte array address
 967     const Register len    = O2; // len
 968     const Register offset = O3; // offset
 969 
 970     // Arguments are reversed on java expression stack
 971     // Calculate address of start element
 972     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
 973       __ lduw(Gargs, 0,  len);
 974       __ lduw(Gargs, 8,  offset);
 975       __ ldx( Gargs, 16, buf);
 976       __ lduw(Gargs, 32, crc);
 977       __ add(buf, offset, buf);
 978     } else {
 979       __ lduw(Gargs, 0,  len);
 980       __ lduw(Gargs, 8,  offset);
 981       __ ldx( Gargs, 16, buf);
 982       __ lduw(Gargs, 24, crc);


1380 #endif // ASSERT
1381   __ set(_thread_in_native, G3_scratch);
1382   __ st(G3_scratch, thread_state);
1383 
1384   // Call the jni method, using the delay slot to set the JNIEnv* argument.
1385   __ save_thread(L7_thread_cache); // save Gthread
1386   __ callr(O0, 0);
1387   __ delayed()->
1388      add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1389 
1390   // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1391 
1392   __ restore_thread(L7_thread_cache); // restore G2_thread
1393   __ reinit_heapbase();
1394 
1395   // must we block?
1396 
1397   // Block, if necessary, before resuming in _thread_in_Java state.
1398   // In order for GC to work, don't clear the last_Java_sp until after blocking.
1399   { Label no_block;
1400     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
1401 
1402     // Switch thread to "native transition" state before reading the synchronization state.
1403     // This additional state is necessary because reading and testing the synchronization
1404     // state is not atomic w.r.t. GC, as this scenario demonstrates:
1405     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1406     //     VM thread changes sync state to synchronizing and suspends threads for GC.
1407     //     Thread A is resumed to finish this native method, but doesn't block here since it
1408     //     didn't see any synchronization is progress, and escapes.
1409     __ set(_thread_in_native_trans, G3_scratch);
1410     __ st(G3_scratch, thread_state);
1411     if (os::is_MP()) {
1412       if (UseMembar) {
1413         // Force this write out before the read below
1414         __ membar(Assembler::StoreLoad);
1415       } else {
1416         // Write serialization page so VM thread can do a pseudo remote membar.
1417         // We use the current thread pointer to calculate a thread specific
1418         // offset to write to within the page. This minimizes bus traffic
1419         // due to cache line collision.
1420         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1421       }
1422     }
1423     __ load_contents(sync_state, G3_scratch);
1424     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1425 
1426     Label L;
1427     __ br(Assembler::notEqual, false, Assembler::pn, L);
1428     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1429     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
1430     __ bind(L);
1431 
1432     // Block.  Save any potential method result value before the operation and
1433     // use a leaf call to leave the last_Java_frame setup undisturbed.
1434     save_native_result();
1435     __ call_VM_leaf(L7_thread_cache,
1436                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1437                     G2_thread);
1438 
1439     // Restore any method result value
1440     restore_native_result();
1441     __ bind(no_block);
1442   }
1443 
1444   // Clear the frame anchor now
1445 
1446   __ reset_last_Java_frame();
1447 




 895     return entry;
 896   }
 897 #endif // INCLUDE_ALL_GCS
 898 
 899   // If G1 is not enabled then attempt to go through the accessor entry point
 900   // Reference.get is an accessor
 901   return NULL;
 902 }
 903 
 904 /**
 905  * Method entry for static native methods:
 906  *   int java.util.zip.CRC32.update(int crc, int b)
 907  */
 908 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 909 
 910   if (UseCRC32Intrinsics) {
 911     address entry = __ pc();
 912 
 913     Label L_slow_path;
 914     // If we need a safepoint check, generate full interpreter entry.
 915     __ safepoint_poll(L_slow_path, false, G2_thread, O2);
 916     __ delayed()->nop();


 917 
 918     // Load parameters
 919     const Register crc   = O0; // initial crc
 920     const Register val   = O1; // byte to update with
 921     const Register table = O2; // address of 256-entry lookup table
 922 
 923     __ ldub(Gargs, 3, val);
 924     __ lduw(Gargs, 8, crc);
 925 
 926     __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
 927 
 928     __ not1(crc); // ~crc
 929     __ clruwu(crc);
 930     __ update_byte_crc32(crc, val, table);
 931     __ not1(crc); // ~crc
 932 
 933     // result in O0
 934     __ retl();
 935     __ delayed()->nop();
 936 
 937     // generate a vanilla native entry as the slow path
 938     __ bind(L_slow_path);
 939     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
 940     return entry;
 941   }
 942   return NULL;
 943 }
 944 
 945 /**
 946  * Method entry for static native methods:
 947  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 948  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 949  */
 950 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 951 
 952   if (UseCRC32Intrinsics) {
 953     address entry = __ pc();
 954 
 955     Label L_slow_path;
 956     // If we need a safepoint check, generate full interpreter entry.
 957 
 958     __ safepoint_poll(L_slow_path, false, G2_thread, O2);
 959     __ delayed()->nop();

 960 
 961     // Load parameters from the stack
 962     const Register crc    = O0; // initial crc
 963     const Register buf    = O1; // source java byte array address
 964     const Register len    = O2; // len
 965     const Register offset = O3; // offset
 966 
 967     // Arguments are reversed on java expression stack
 968     // Calculate address of start element
 969     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
 970       __ lduw(Gargs, 0,  len);
 971       __ lduw(Gargs, 8,  offset);
 972       __ ldx( Gargs, 16, buf);
 973       __ lduw(Gargs, 32, crc);
 974       __ add(buf, offset, buf);
 975     } else {
 976       __ lduw(Gargs, 0,  len);
 977       __ lduw(Gargs, 8,  offset);
 978       __ ldx( Gargs, 16, buf);
 979       __ lduw(Gargs, 24, crc);


1377 #endif // ASSERT
1378   __ set(_thread_in_native, G3_scratch);
1379   __ st(G3_scratch, thread_state);
1380 
1381   // Call the jni method, using the delay slot to set the JNIEnv* argument.
1382   __ save_thread(L7_thread_cache); // save Gthread
1383   __ callr(O0, 0);
1384   __ delayed()->
1385      add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1386 
1387   // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1388 
1389   __ restore_thread(L7_thread_cache); // restore G2_thread
1390   __ reinit_heapbase();
1391 
1392   // must we block?
1393 
1394   // Block, if necessary, before resuming in _thread_in_Java state.
1395   // In order for GC to work, don't clear the last_Java_sp until after blocking.
1396   { Label no_block;

1397 
1398     // Switch thread to "native transition" state before reading the synchronization state.
1399     // This additional state is necessary because reading and testing the synchronization
1400     // state is not atomic w.r.t. GC, as this scenario demonstrates:
1401     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1402     //     VM thread changes sync state to synchronizing and suspends threads for GC.
1403     //     Thread A is resumed to finish this native method, but doesn't block here since it
1404     //     didn't see any synchronization is progress, and escapes.
1405     __ set(_thread_in_native_trans, G3_scratch);
1406     __ st(G3_scratch, thread_state);
1407     if (os::is_MP()) {
1408       if (UseMembar) {
1409         // Force this write out before the read below
1410         __ membar(Assembler::StoreLoad);
1411       } else {
1412         // Write serialization page so VM thread can do a pseudo remote membar.
1413         // We use the current thread pointer to calculate a thread specific
1414         // offset to write to within the page. This minimizes bus traffic
1415         // due to cache line collision.
1416         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1417       }
1418     }


1419 
1420     Label L;
1421     __ safepoint_poll(L, false, G2_thread, G3_scratch);
1422     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1423     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
1424     __ bind(L);
1425 
1426     // Block.  Save any potential method result value before the operation and
1427     // use a leaf call to leave the last_Java_frame setup undisturbed.
1428     save_native_result();
1429     __ call_VM_leaf(L7_thread_cache,
1430                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1431                     G2_thread);
1432 
1433     // Restore any method result value
1434     restore_native_result();
1435     __ bind(no_block);
1436   }
1437 
1438   // Clear the frame anchor now
1439 
1440   __ reset_last_Java_frame();
1441 


< prev index next >