41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/synchronizer.hpp"
47 #include "runtime/timer.hpp"
48 #include "runtime/vframeArray.hpp"
49 #include "utilities/debug.hpp"
50 #include "utilities/macros.hpp"
51
52 #undef __
53 #define __ _masm->
54
55 #ifdef PRODUCT
56 #define BLOCK_COMMENT(str) /* nothing */
57 #else
58 #define BLOCK_COMMENT(str) __ block_comment(str)
59 #endif
60
61 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
62
63 //-----------------------------------------------------------------------------
64
65 // Actually we should never reach here since we do stack overflow checks before pushing any frame.
66 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
67 address entry = __ pc();
68 __ unimplemented("generate_StackOverflowError_handler");
69 return entry;
70 }
71
72 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
73 address entry = __ pc();
74 __ empty_expression_stack();
75 __ load_const_optimized(R4_ARG2, (address) name);
76 // Index is in R17_tos.
77 __ mr(R5_ARG3, R17_tos);
78 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
79 return entry;
80 }
81
708
709 //=============================================================================
710 // Increment invocation counter. On overflow, entry to JNI method
711 // will be compiled.
712 Label invocation_counter_overflow, continue_after_compile;
713 if (inc_counter) {
714 if (synchronized) {
715 // Since at this point in the method invocation the exception handler
716 // would try to exit the monitor of synchronized methods which hasn't
717 // been entered yet, we set the thread local variable
718 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
719 // runtime, exception handling i.e. unlock_if_synchronized_method will
720 // check this thread local flag.
721 // This flag has two effects, one is to force an unwind in the topmost
722 // interpreter frame and not perform an unlock while doing so.
723 __ li(R0, 1);
724 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
725 }
726 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
727
728 __ BIND(continue_after_compile);
729 // Reset the _do_not_unlock_if_synchronized flag.
730 if (synchronized) {
731 __ li(R0, 0);
732 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
733 }
734 }
735
736 // access_flags = method->access_flags();
737 // Load access flags.
738 assert(access_flags->is_nonvolatile(),
739 "access_flags must be in a non-volatile register");
740 // Type check.
741 assert(4 == sizeof(AccessFlags), "unexpected field size");
742 __ lwz(access_flags, method_(access_flags));
743
744 // We don't want to reload R19_method and access_flags after calls
745 // to some helper functions.
746 assert(R19_method->is_nonvolatile(),
747 "R19_method must be a non-volatile register");
748
768
769 __ cmpdi(CCR0, signature_handler_fd, 0);
770 __ bne(CCR0, call_signature_handler);
771
772 // Method has never been called. Either generate a specialized
773 // handler or point to the slow one.
774 //
775 // Pass parameter 'false' to avoid exception check in call_VM.
776 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
777
778 // Check for an exception while looking up the target method. If we
779 // incurred one, bail.
780 __ ld(pending_exception, thread_(pending_exception));
781 __ cmpdi(CCR0, pending_exception, 0);
782 __ bne(CCR0, exception_return_sync_check); // Has pending exception.
783
784 // Reload signature handler, it may have been created/assigned in the meanwhile.
785 __ ld(signature_handler_fd, method_(signature_handler));
786 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
787
788 __ BIND(call_signature_handler);
789
790 // Before we call the signature handler we push a new frame to
791 // protect the interpreter frame volatile registers when we return
792 // from jni but before we can get back to Java.
793
794 // First set the frame anchor while the SP/FP registers are
795 // convenient and the slow signature handler can use this same frame
796 // anchor.
797
798 // We have a TOP_IJAVA_FRAME here, which belongs to us.
799 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
800
801 // Now the interpreter frame (and its call chain) have been
802 // invalidated and flushed. We are now protected against eager
803 // being enabled in native code. Even if it goes eager the
804 // registers will be reloaded as clean and we will invalidate after
805 // the call so no spurious flush should be possible.
806
807 // Call signature handler and pass locals address.
808 //
838 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
839 __ bfalse(CCR0, method_is_not_static);
840
841 // constants = method->constants();
842 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
843 __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
844 // pool_holder = method->constants()->pool_holder();
845 __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
846 R11_scratch1/*constants*/);
847
848 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
849
850 // mirror = pool_holder->klass_part()->java_mirror();
851 __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
852 // state->_native_mirror = mirror;
853
854 __ ld(R11_scratch1, 0, R1_SP);
855 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
856 // R4_ARG2 = &state->_oop_temp;
857 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
858 __ BIND(method_is_not_static);
859 }
860
861 // At this point, arguments have been copied off the stack into
862 // their JNI positions. Oops are boxed in-place on the stack, with
863 // handles copied to arguments. The result handler address is in a
864 // register.
865
866 // Pass JNIEnv address as first parameter.
867 __ addir(R3_ARG1, thread_(jni_environment));
868
869 // Load the native_method entry before we change the thread state.
870 __ ld(native_method_fd, method_(native_function));
871
872 //=============================================================================
873 // Transition from _thread_in_Java to _thread_in_native. As soon as
874 // we make this change the safepoint code needs to be certain that
875 // the last Java frame we established is good. The pc in that frame
876 // just needs to be near here not an actual return address.
877
878 // We use release_store_fence to update values like the thread state, where
1051 // Move native method result back into proper registers and return.
1052 // Invoke result handler (may unbox/promote).
1053 __ ld(R11_scratch1, 0, R1_SP);
1054 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
1055 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
1056 __ call_stub(result_handler_addr);
1057
1058 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1059
1060 // Must use the return pc which was loaded from the caller's frame
1061 // as the VM uses return-pc-patching for deoptimization.
1062 __ mtlr(R0);
1063 __ blr();
1064
1065 //-----------------------------------------------------------------------------
1066 // An exception is pending. We call into the runtime only if the
1067 // caller was not interpreted. If it was interpreted the
1068 // interpreter will do the correct thing. If it isn't interpreted
1069 // (call stub/compiled code) we will change our return and continue.
1070
1071 __ BIND(exception_return_sync_check);
1072
1073 if (synchronized) {
1074 // Don't check for exceptions since we're still in the i2n frame. Do that
1075 // manually afterwards.
1076 unlock_method(false);
1077 }
1078 __ BIND(exception_return_sync_check_already_unlocked);
1079
1080 const Register return_pc = R31;
1081
1082 __ ld(return_pc, 0, R1_SP);
1083 __ ld(return_pc, _abi(lr), return_pc);
1084
1085 // Get the address of the exception handler.
1086 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1087 R16_thread,
1088 return_pc /* return pc */);
1089 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
1090
1091 // Load the PC of the the exception handler into LR.
1092 __ mtlr(R3_RET);
1093
1094 // Load exception into R3_ARG1 and clear pending exception in thread.
1095 __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
1096 __ li(R4_ARG2, 0);
1097 __ std(R4_ARG2, thread_(pending_exception));
1098
1221 // --------------------------------------------------------------------------
1222 // Start executing instructions.
1223 __ dispatch_next(vtos);
1224
1225 // --------------------------------------------------------------------------
1226 // Out of line counter overflow and MDO creation code.
1227 if (ProfileInterpreter) {
1228 // We have decided to profile this method in the interpreter.
1229 __ bind(profile_method);
1230 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1231 __ set_method_data_pointer_for_bcp();
1232 __ b(profile_method_continue);
1233 }
1234
1235 if (inc_counter) {
1236 // Handle invocation counter overflow.
1237 __ bind(invocation_counter_overflow);
1238 generate_counter_overflow(profile_method_continue);
1239 }
1240 return entry;
1241 }
1242
1243 // These should never be compiled since the interpreter will prefer
1244 // the compiled version to the intrinsic version.
1245 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1246 return !math_entry_available(method_kind(m));
1247 }
1248
1249 // How much stack a method activation needs in stack slots.
1250 // We must calc this exactly like in generate_fixed_frame.
1251 // Note: This returns the conservative size assuming maximum alignment.
1252 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1253 const int max_alignment_size = 2;
1254 const int abi_scratch = frame::abi_reg_args_size;
1255 return method->max_locals() + method->max_stack() +
1256 frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
1257 }
1258
1259 // Returns number of stackElementWords needed for the interpreter frame with the
1260 // given sections.
|
41 #include "runtime/arguments.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/synchronizer.hpp"
47 #include "runtime/timer.hpp"
48 #include "runtime/vframeArray.hpp"
49 #include "utilities/debug.hpp"
50 #include "utilities/macros.hpp"
51
52 #undef __
53 #define __ _masm->
54
55 #ifdef PRODUCT
56 #define BLOCK_COMMENT(str) /* nothing */
57 #else
58 #define BLOCK_COMMENT(str) __ block_comment(str)
59 #endif
60
61 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":")
62
63 //-----------------------------------------------------------------------------
64
65 // Actually we should never reach here since we do stack overflow checks before pushing any frame.
66 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
67 address entry = __ pc();
68 __ unimplemented("generate_StackOverflowError_handler");
69 return entry;
70 }
71
72 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
73 address entry = __ pc();
74 __ empty_expression_stack();
75 __ load_const_optimized(R4_ARG2, (address) name);
76 // Index is in R17_tos.
77 __ mr(R5_ARG3, R17_tos);
78 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
79 return entry;
80 }
81
708
709 //=============================================================================
710 // Increment invocation counter. On overflow, entry to JNI method
711 // will be compiled.
712 Label invocation_counter_overflow, continue_after_compile;
713 if (inc_counter) {
714 if (synchronized) {
715 // Since at this point in the method invocation the exception handler
716 // would try to exit the monitor of synchronized methods which hasn't
717 // been entered yet, we set the thread local variable
718 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
719 // runtime, exception handling i.e. unlock_if_synchronized_method will
720 // check this thread local flag.
721 // This flag has two effects, one is to force an unwind in the topmost
722 // interpreter frame and not perform an unlock while doing so.
723 __ li(R0, 1);
724 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
725 }
726 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
727
728 BIND(continue_after_compile);
729 // Reset the _do_not_unlock_if_synchronized flag.
730 if (synchronized) {
731 __ li(R0, 0);
732 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
733 }
734 }
735
736 // access_flags = method->access_flags();
737 // Load access flags.
738 assert(access_flags->is_nonvolatile(),
739 "access_flags must be in a non-volatile register");
740 // Type check.
741 assert(4 == sizeof(AccessFlags), "unexpected field size");
742 __ lwz(access_flags, method_(access_flags));
743
744 // We don't want to reload R19_method and access_flags after calls
745 // to some helper functions.
746 assert(R19_method->is_nonvolatile(),
747 "R19_method must be a non-volatile register");
748
768
769 __ cmpdi(CCR0, signature_handler_fd, 0);
770 __ bne(CCR0, call_signature_handler);
771
772 // Method has never been called. Either generate a specialized
773 // handler or point to the slow one.
774 //
775 // Pass parameter 'false' to avoid exception check in call_VM.
776 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
777
778 // Check for an exception while looking up the target method. If we
779 // incurred one, bail.
780 __ ld(pending_exception, thread_(pending_exception));
781 __ cmpdi(CCR0, pending_exception, 0);
782 __ bne(CCR0, exception_return_sync_check); // Has pending exception.
783
784 // Reload signature handler, it may have been created/assigned in the meanwhile.
785 __ ld(signature_handler_fd, method_(signature_handler));
786 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
787
788 BIND(call_signature_handler);
789
790 // Before we call the signature handler we push a new frame to
791 // protect the interpreter frame volatile registers when we return
792 // from jni but before we can get back to Java.
793
794 // First set the frame anchor while the SP/FP registers are
795 // convenient and the slow signature handler can use this same frame
796 // anchor.
797
798 // We have a TOP_IJAVA_FRAME here, which belongs to us.
799 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
800
801 // Now the interpreter frame (and its call chain) have been
802 // invalidated and flushed. We are now protected against eager
803 // being enabled in native code. Even if it goes eager the
804 // registers will be reloaded as clean and we will invalidate after
805 // the call so no spurious flush should be possible.
806
807 // Call signature handler and pass locals address.
808 //
838 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
839 __ bfalse(CCR0, method_is_not_static);
840
841 // constants = method->constants();
842 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
843 __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
844 // pool_holder = method->constants()->pool_holder();
845 __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
846 R11_scratch1/*constants*/);
847
848 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
849
850 // mirror = pool_holder->klass_part()->java_mirror();
851 __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
852 // state->_native_mirror = mirror;
853
854 __ ld(R11_scratch1, 0, R1_SP);
855 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
856 // R4_ARG2 = &state->_oop_temp;
857 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
858 BIND(method_is_not_static);
859 }
860
861 // At this point, arguments have been copied off the stack into
862 // their JNI positions. Oops are boxed in-place on the stack, with
863 // handles copied to arguments. The result handler address is in a
864 // register.
865
866 // Pass JNIEnv address as first parameter.
867 __ addir(R3_ARG1, thread_(jni_environment));
868
869 // Load the native_method entry before we change the thread state.
870 __ ld(native_method_fd, method_(native_function));
871
872 //=============================================================================
873 // Transition from _thread_in_Java to _thread_in_native. As soon as
874 // we make this change the safepoint code needs to be certain that
875 // the last Java frame we established is good. The pc in that frame
876 // just needs to be near here not an actual return address.
877
878 // We use release_store_fence to update values like the thread state, where
1051 // Move native method result back into proper registers and return.
1052 // Invoke result handler (may unbox/promote).
1053 __ ld(R11_scratch1, 0, R1_SP);
1054 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
1055 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
1056 __ call_stub(result_handler_addr);
1057
1058 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1059
1060 // Must use the return pc which was loaded from the caller's frame
1061 // as the VM uses return-pc-patching for deoptimization.
1062 __ mtlr(R0);
1063 __ blr();
1064
1065 //-----------------------------------------------------------------------------
1066 // An exception is pending. We call into the runtime only if the
1067 // caller was not interpreted. If it was interpreted the
1068 // interpreter will do the correct thing. If it isn't interpreted
1069 // (call stub/compiled code) we will change our return and continue.
1070
1071 BIND(exception_return_sync_check);
1072
1073 if (synchronized) {
1074 // Don't check for exceptions since we're still in the i2n frame. Do that
1075 // manually afterwards.
1076 unlock_method(false);
1077 }
1078 BIND(exception_return_sync_check_already_unlocked);
1079
1080 const Register return_pc = R31;
1081
1082 __ ld(return_pc, 0, R1_SP);
1083 __ ld(return_pc, _abi(lr), return_pc);
1084
1085 // Get the address of the exception handler.
1086 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1087 R16_thread,
1088 return_pc /* return pc */);
1089 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
1090
1091 // Load the PC of the the exception handler into LR.
1092 __ mtlr(R3_RET);
1093
1094 // Load exception into R3_ARG1 and clear pending exception in thread.
1095 __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
1096 __ li(R4_ARG2, 0);
1097 __ std(R4_ARG2, thread_(pending_exception));
1098
1221 // --------------------------------------------------------------------------
1222 // Start executing instructions.
1223 __ dispatch_next(vtos);
1224
1225 // --------------------------------------------------------------------------
1226 // Out of line counter overflow and MDO creation code.
1227 if (ProfileInterpreter) {
1228 // We have decided to profile this method in the interpreter.
1229 __ bind(profile_method);
1230 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1231 __ set_method_data_pointer_for_bcp();
1232 __ b(profile_method_continue);
1233 }
1234
1235 if (inc_counter) {
1236 // Handle invocation counter overflow.
1237 __ bind(invocation_counter_overflow);
1238 generate_counter_overflow(profile_method_continue);
1239 }
1240 return entry;
1241 }
1242 // CRC32 Intrinsics.
1243 //
1244 // Contract on scratch and work registers.
1245 // =======================================
1246 //
1247 // On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers.
1248 // You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set.
1249 // You can't rely on these registers across calls.
1250 //
1251 // The generators for CRC32_update and for CRC32_updateBytes use the
1252 // scratch/work register set internally, passing the work registers
1253 // as arguments to the MacroAssembler emitters as required.
1254 //
1255 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments.
1256 // Their contents is not constant but may change according to the requirements
1257 // of the emitted code.
1258 //
1259 // All other registers from the scratch/work register set are used "internally"
1260 // and contain garbage (i.e. unpredictable values) once blr() is reached.
1261 // Basically, only R3_RET contains a defined value which is the function result.
1262 //
1263 /**
1264 * Method entry for static native methods:
1265 * int java.util.zip.CRC32.update(int crc, int b)
1266 */
1267 address InterpreterGenerator::generate_CRC32_update_entry() {
1268 address start = __ function_entry(); // Remember stub start address (is rtn value).
1269
1270 if (UseCRC32Intrinsics) {
1271 Label slow_path;
1272
1273 // Safepoint check
1274 const Register sync_state = R11_scratch1;
1275 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
1276 __ lwz(sync_state, sync_state_offs, sync_state);
1277 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
1278 __ bne(CCR0, slow_path);
1279
1280 // We don't generate local frame and don't align stack because
1281 // we not even call stub code (we generate the code inline)
1282 // and there is no safepoint on this path.
1283
1284 // Load java parameters.
1285 // R15_esp is callers operand stack pointer, i.e. it points to the parameters.
1286 const Register argP = R15_esp;
1287 const Register crc = R3_ARG1; // crc value
1288 const Register data = R4_ARG2; // address of java byte value (kernel_crc32 needs address)
1289 const Register dataLen = R5_ARG3; // source data len (1 byte). Not used because calling the single-byte emitter.
1290 const Register table = R6_ARG4; // address of crc32 table
1291 const Register tmp = dataLen; // Reuse unused len register to show we don't actually need a separate tmp here.
1292
1293 BLOCK_COMMENT("CRC32_update {");
1294
1295 // Arguments are reversed on java expression stack
1296 #ifdef VM_LITTLE_ENDIAN
1297 __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
1298 // Being passed as an int, the single byte is at offset +0.
1299 #else
1300 __ addi(data, argP, 3+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
1301 // Being passed from java as an int, the single byte is at offset +3.
1302 #endif
1303 __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
1304
1305 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
1306 __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
1307
1308 // Restore caller sp for c2i case and return.
1309 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
1310 __ blr();
1311
1312 // Generate a vanilla native entry as the slow path.
1313 BLOCK_COMMENT("} CRC32_update");
1314 BIND(slow_path);
1315 }
1316
1317 (void) generate_native_entry(false);
1318
1319 #if defined(ABI_ELFv2)
1320 return start;
1321 #else
1322 return *(address*)start;
1323 #endif
1324 }
1325
1326 // CRC32 Intrinsics.
1327 /**
1328 * Method entry for static native methods:
1329 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
1330 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
1331 */
1332 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1333 address start = __ function_entry(); // Remember stub start address (is rtn value).
1334
1335 if (UseCRC32Intrinsics) {
1336 Label slow_path;
1337
1338 // Safepoint check
1339 const Register sync_state = R11_scratch1;
1340 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
1341 __ lwz(sync_state, sync_state_offs, sync_state);
1342 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
1343 __ bne(CCR0, slow_path);
1344
1345 // We don't generate local frame and don't align stack because
1346 // we not even call stub code (we generate the code inline)
1347 // and there is no safepoint on this path.
1348
1349 // Load parameters.
1350 // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
1351 const Register argP = R15_esp;
1352 const Register crc = R3_ARG1; // crc value
1353 const Register data = R4_ARG2; // address of java byte array
1354 const Register dataLen = R5_ARG3; // source data len
1355 const Register table = R6_ARG4; // address of crc32 table
1356
1357 const Register t0 = R9; // scratch registers for crc calculation
1358 const Register t1 = R10;
1359 const Register t2 = R11;
1360 const Register t3 = R12;
1361
1362 const Register tc0 = R2; // registers to hold pre-calculated column addresses
1363 const Register tc1 = R7;
1364 const Register tc2 = R8;
1365 const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters
1366
1367 const Register tmp = t0; // Only used very locally to calculate byte buffer address.
1368
1369 // Arguments are reversed on java expression stack.
1370 // Calculate address of start element.
1371 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct".
1372 BLOCK_COMMENT("CRC32_updateByteBuffer {");
1373 // crc @ (SP + 5W) (32bit)
1374 // buf @ (SP + 3W) (64bit ptr to long array)
1375 // off @ (SP + 2W) (32bit)
1376 // dataLen @ (SP + 1W) (32bit)
1377 // data = buf + off
1378 __ ld( data, 3*wordSize, argP); // start of byte buffer
1379 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset
1380 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process
1381 __ lwz( crc, 5*wordSize, argP); // current crc state
1382 __ add( data, data, tmp); // Add byte buffer offset.
1383 } else { // Used for "updateBytes update".
1384 BLOCK_COMMENT("CRC32_updateBytes {");
1385 // crc @ (SP + 4W) (32bit)
1386 // buf @ (SP + 3W) (64bit ptr to byte array)
1387 // off @ (SP + 2W) (32bit)
1388 // dataLen @ (SP + 1W) (32bit)
1389 // data = buf + off + base_offset
1390 __ ld( data, 3*wordSize, argP); // start of byte buffer
1391 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset
1392 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process
1393 __ add( data, data, tmp); // add byte buffer offset
1394 __ lwz( crc, 4*wordSize, argP); // current crc state
1395 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1396 }
1397
1398 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
1399
1400 // Performance measurements show the 1word and 2word variants to be almost equivalent,
1401 // with very light advantages for the 1word variant. We chose the 1word variant for
1402 // code compactness.
1403 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
1404
1405 // Restore caller sp for c2i case and return.
1406 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
1407 __ blr();
1408
1409 // Generate a vanilla native entry as the slow path.
1410 BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
1411 BIND(slow_path);
1412 }
1413
1414 (void) generate_native_entry(false);
1415
1416 #if defined(ABI_ELFv2)
1417 return start;
1418 #else
1419 return *(address*)start;
1420 #endif
1421 }
1422
1423 // These should never be compiled since the interpreter will prefer
1424 // the compiled version to the intrinsic version.
1425 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1426 return !math_entry_available(method_kind(m));
1427 }
1428
1429 // How much stack a method activation needs in stack slots.
1430 // We must calc this exactly like in generate_fixed_frame.
1431 // Note: This returns the conservative size assuming maximum alignment.
1432 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1433 const int max_alignment_size = 2;
1434 const int abi_scratch = frame::abi_reg_args_size;
1435 return method->max_locals() + method->max_stack() +
1436 frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
1437 }
1438
1439 // Returns number of stackElementWords needed for the interpreter frame with the
1440 // given sections.
|