30 #include "oops/arrayOop.hpp"
31 #include "oops/markOop.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/method.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/jvmtiRedefineClassesTrace.hpp"
36 #include "prims/jvmtiThreadState.hpp"
37 #include "runtime/basicLock.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/thread.inline.hpp"
41
42
43 // Implementation of InterpreterMacroAssembler
44
45 void InterpreterMacroAssembler::jump_to_entry(address entry) {
46 assert(entry, "Entry must have been generated by now");
47 b(entry);
48 }
49
50 #ifndef CC_INTERP
51
52 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
53 if (JvmtiExport::can_pop_frame()) {
54 Label L;
55 // Initiate popframe handling only if it is not already being
56 // processed. If the flag has the popframe_processing bit set, it
57 // means that this code is called *during* popframe handling - we
58 // don't want to reenter.
59 // This method is only called just after the call into the vm in
60 // call_VM_base, so the arg registers are available.
61 ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
62 tstw(rscratch1, JavaThread::popframe_pending_bit);
63 br(Assembler::EQ, L);
64 tstw(rscratch1, JavaThread::popframe_processing_bit);
65 br(Assembler::NE, L);
66 // Call Interpreter::remove_activation_preserving_args_entry() to get the
67 // address of the same-named entrypoint in the generated interpreter code.
68 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
69 br(r0);
70 bind(L);
71 }
578 // jvmti support
579 if (notify_jvmdi) {
580 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
581 } else {
582 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
583 }
584
585 // remove activation
586 // get sender esp
587 ldr(esp,
588 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
589 // remove frame anchor
590 leave();
591 // If we're returning to interpreted code we will shortly be
592 // adjusting SP to allow some space for ESP. If we're returning to
593 // compiled code the saved sender SP was saved in sender_sp, so this
594 // restores it.
595 andr(sp, esp, -16);
596 }
597
598 #endif // C_INTERP
599
600 // Lock object
601 //
602 // Args:
603 // c_rarg1: BasicObjectLock to be used for locking
604 //
605 // Kills:
606 // r0
607 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
608 // rscratch1, rscratch2 (scratch regs)
609 void InterpreterMacroAssembler::lock_object(Register lock_reg)
610 {
611 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
612 if (UseHeavyMonitors) {
613 call_VM(noreg,
614 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
615 lock_reg);
616 } else {
617 Label done;
618
619 const Register swap_reg = r0;
741 BasicLock::displaced_header_offset_in_bytes()));
742
743 // Test for recursion
744 cbz(header_reg, done);
745
746 // Atomic swap back the old header
747 cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
748
749 // Call the runtime routine for slow case.
750 str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
751 call_VM(noreg,
752 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
753 lock_reg);
754
755 bind(done);
756
757 restore_bcp();
758 }
759 }
760
761 #ifndef CC_INTERP
762
763 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
764 Label& zero_continue) {
765 assert(ProfileInterpreter, "must be profiling interpreter");
766 ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
767 cbz(mdp, zero_continue);
768 }
769
770 // Set the method data pointer for the current bcp.
771 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
772 assert(ProfileInterpreter, "must be profiling interpreter");
773 Label set_mdp;
774 stp(r0, r1, Address(pre(sp, -2 * wordSize)));
775
776 // Test MDO to avoid the call if it is NULL.
777 ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
778 cbz(r0, set_mdp);
779 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp);
780 // r0: mdi
781 // mdo is guaranteed to be non-zero here, we checked for it before the call.
782 ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1328 index,
1329 in_bytes(MultiBranchData::relative_count_offset()));
1330
1331 // The method data pointer needs to be updated.
1332 update_mdp_by_offset(mdp,
1333 index,
1334 in_bytes(MultiBranchData::
1335 relative_displacement_offset()));
1336
1337 bind(profile_continue);
1338 }
1339 }
1340
1341 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1342 if (state == atos) {
1343 MacroAssembler::verify_oop(reg);
1344 }
1345 }
1346
1347 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1348 #endif // !CC_INTERP
1349
1350
1351 void InterpreterMacroAssembler::notify_method_entry() {
1352 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1353 // track stack depth. If it is possible to enter interp_only_mode we add
1354 // the code to check if the event should be sent.
1355 if (JvmtiExport::can_post_interpreter_events()) {
1356 Label L;
1357 ldr(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1358 tst(r3, ~0);
1359 br(Assembler::EQ, L);
1360 call_VM(noreg, CAST_FROM_FN_PTR(address,
1361 InterpreterRuntime::post_method_entry));
1362 bind(L);
1363 }
1364
1365 {
1366 SkipIfEqual skip(this, &DTraceMethodProbes, false);
1367 get_method(c_rarg1);
1368 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1375 call_VM_leaf(
1376 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1377 rthread, c_rarg1);
1378 }
1379
1380 }
1381
1382
1383 void InterpreterMacroAssembler::notify_method_exit(
1384 TosState state, NotifyMethodExitMode mode) {
1385 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1386 // track stack depth. If it is possible to enter interp_only_mode we add
1387 // the code to check if the event should be sent.
1388 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1389 Label L;
1390 // Note: frame::interpreter_frame_result has a dependency on how the
1391 // method result is saved across the call to post_method_exit. If this
1392 // is changed then the interpreter_frame_result implementation will
1393 // need to be updated too.
1394
1395 // For c++ interpreter the result is always stored at a known location in the frame
1396 // template interpreter will leave it on the top of the stack.
1397 NOT_CC_INTERP(push(state);)
1398 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1399 cbz(r3, L);
1400 call_VM(noreg,
1401 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1402 bind(L);
1403 NOT_CC_INTERP(pop(state));
1404 }
1405
1406 {
1407 SkipIfEqual skip(this, &DTraceMethodProbes, false);
1408 NOT_CC_INTERP(push(state));
1409 get_method(c_rarg1);
1410 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1411 rthread, c_rarg1);
1412 NOT_CC_INTERP(pop(state));
1413 }
1414 }
1415
1416
1417 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1418 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1419 int increment, Address mask,
1420 Register scratch, Register scratch2,
1421 bool preloaded, Condition cond,
1422 Label* where) {
1423 if (!preloaded) {
1424 ldrw(scratch, counter_addr);
1425 }
1426 add(scratch, scratch, increment);
1427 strw(scratch, counter_addr);
1428 ldrw(scratch2, mask);
1429 ands(scratch, scratch, scratch2);
1430 br(cond, *where);
1431 }
1432
|
30 #include "oops/arrayOop.hpp"
31 #include "oops/markOop.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/method.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/jvmtiRedefineClassesTrace.hpp"
36 #include "prims/jvmtiThreadState.hpp"
37 #include "runtime/basicLock.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/thread.inline.hpp"
41
42
43 // Implementation of InterpreterMacroAssembler
44
45 void InterpreterMacroAssembler::jump_to_entry(address entry) {
46 assert(entry, "Entry must have been generated by now");
47 b(entry);
48 }
49
50 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
51 if (JvmtiExport::can_pop_frame()) {
52 Label L;
53 // Initiate popframe handling only if it is not already being
54 // processed. If the flag has the popframe_processing bit set, it
55 // means that this code is called *during* popframe handling - we
56 // don't want to reenter.
57 // This method is only called just after the call into the vm in
58 // call_VM_base, so the arg registers are available.
59 ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
60 tstw(rscratch1, JavaThread::popframe_pending_bit);
61 br(Assembler::EQ, L);
62 tstw(rscratch1, JavaThread::popframe_processing_bit);
63 br(Assembler::NE, L);
64 // Call Interpreter::remove_activation_preserving_args_entry() to get the
65 // address of the same-named entrypoint in the generated interpreter code.
66 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
67 br(r0);
68 bind(L);
69 }
576 // jvmti support
577 if (notify_jvmdi) {
578 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
579 } else {
580 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
581 }
582
583 // remove activation
584 // get sender esp
585 ldr(esp,
586 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
587 // remove frame anchor
588 leave();
589 // If we're returning to interpreted code we will shortly be
590 // adjusting SP to allow some space for ESP. If we're returning to
591 // compiled code the saved sender SP was saved in sender_sp, so this
592 // restores it.
593 andr(sp, esp, -16);
594 }
595
596 // Lock object
597 //
598 // Args:
599 // c_rarg1: BasicObjectLock to be used for locking
600 //
601 // Kills:
602 // r0
603 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
604 // rscratch1, rscratch2 (scratch regs)
605 void InterpreterMacroAssembler::lock_object(Register lock_reg)
606 {
607 assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
608 if (UseHeavyMonitors) {
609 call_VM(noreg,
610 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
611 lock_reg);
612 } else {
613 Label done;
614
615 const Register swap_reg = r0;
737 BasicLock::displaced_header_offset_in_bytes()));
738
739 // Test for recursion
740 cbz(header_reg, done);
741
742 // Atomic swap back the old header
743 cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
744
745 // Call the runtime routine for slow case.
746 str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
747 call_VM(noreg,
748 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
749 lock_reg);
750
751 bind(done);
752
753 restore_bcp();
754 }
755 }
756
757 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
758 Label& zero_continue) {
759 assert(ProfileInterpreter, "must be profiling interpreter");
760 ldr(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
761 cbz(mdp, zero_continue);
762 }
763
764 // Set the method data pointer for the current bcp.
765 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
766 assert(ProfileInterpreter, "must be profiling interpreter");
767 Label set_mdp;
768 stp(r0, r1, Address(pre(sp, -2 * wordSize)));
769
770 // Test MDO to avoid the call if it is NULL.
771 ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));
772 cbz(r0, set_mdp);
773 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp);
774 // r0: mdi
775 // mdo is guaranteed to be non-zero here, we checked for it before the call.
776 ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
1322 index,
1323 in_bytes(MultiBranchData::relative_count_offset()));
1324
1325 // The method data pointer needs to be updated.
1326 update_mdp_by_offset(mdp,
1327 index,
1328 in_bytes(MultiBranchData::
1329 relative_displacement_offset()));
1330
1331 bind(profile_continue);
1332 }
1333 }
1334
1335 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
1336 if (state == atos) {
1337 MacroAssembler::verify_oop(reg);
1338 }
1339 }
1340
1341 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1342
1343
1344 void InterpreterMacroAssembler::notify_method_entry() {
1345 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1346 // track stack depth. If it is possible to enter interp_only_mode we add
1347 // the code to check if the event should be sent.
1348 if (JvmtiExport::can_post_interpreter_events()) {
1349 Label L;
1350 ldr(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1351 tst(r3, ~0);
1352 br(Assembler::EQ, L);
1353 call_VM(noreg, CAST_FROM_FN_PTR(address,
1354 InterpreterRuntime::post_method_entry));
1355 bind(L);
1356 }
1357
1358 {
1359 SkipIfEqual skip(this, &DTraceMethodProbes, false);
1360 get_method(c_rarg1);
1361 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1368 call_VM_leaf(
1369 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1370 rthread, c_rarg1);
1371 }
1372
1373 }
1374
1375
1376 void InterpreterMacroAssembler::notify_method_exit(
1377 TosState state, NotifyMethodExitMode mode) {
1378 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1379 // track stack depth. If it is possible to enter interp_only_mode we add
1380 // the code to check if the event should be sent.
1381 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
1382 Label L;
1383 // Note: frame::interpreter_frame_result has a dependency on how the
1384 // method result is saved across the call to post_method_exit. If this
1385 // is changed then the interpreter_frame_result implementation will
1386 // need to be updated too.
1387
1388 // template interpreter will leave the result on the top of the stack.
1389 push(state);
1390 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1391 cbz(r3, L);
1392 call_VM(noreg,
1393 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1394 bind(L);
1395 pop(state);
1396 }
1397
1398 {
1399 SkipIfEqual skip(this, &DTraceMethodProbes, false);
1400 push(state);
1401 get_method(c_rarg1);
1402 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1403 rthread, c_rarg1);
1404 pop(state);
1405 }
1406 }
1407
1408
1409 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
1410 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1411 int increment, Address mask,
1412 Register scratch, Register scratch2,
1413 bool preloaded, Condition cond,
1414 Label* where) {
1415 if (!preloaded) {
1416 ldrw(scratch, counter_addr);
1417 }
1418 add(scratch, scratch, increment);
1419 strw(scratch, counter_addr);
1420 ldrw(scratch2, mask);
1421 ands(scratch, scratch, scratch2);
1422 br(cond, *where);
1423 }
1424
|