19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // no precompiled headers
26 #include "classfile/vmSymbols.hpp"
27 #include "gc_interface/collectedHeap.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/bytecodeInterpreter.hpp"
30 #include "interpreter/bytecodeInterpreter.inline.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interpreterRuntime.hpp"
33 #include "memory/cardTableModRefBS.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/methodCounters.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/interfaceSupport.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/threadCritical.hpp"
44 #include "utilities/exceptions.hpp"
45 #ifdef TARGET_OS_ARCH_linux_x86
46 # include "orderAccess_linux_x86.inline.hpp"
47 #endif
48 #ifdef TARGET_OS_ARCH_linux_sparc
49 # include "orderAccess_linux_sparc.inline.hpp"
50 #endif
51 #ifdef TARGET_OS_ARCH_linux_zero
52 # include "orderAccess_linux_zero.inline.hpp"
53 #endif
54 #ifdef TARGET_OS_ARCH_solaris_x86
55 # include "orderAccess_solaris_x86.inline.hpp"
56 #endif
57 #ifdef TARGET_OS_ARCH_solaris_sparc
58 # include "orderAccess_solaris_sparc.inline.hpp"
671 tty->print_cr("entering: depth %d bci: %d",
672 (istate->_stack_base - istate->_stack),
673 istate->_bcp - istate->_method->code_base());
674 interesting = true;
675 }
676 }
677 #endif // HACK
678
679
680 // lock method if synchronized
681 if (METHOD->is_synchronized()) {
682 // oop rcvr = locals[0].j.r;
683 oop rcvr;
684 if (METHOD->is_static()) {
685 rcvr = METHOD->constants()->pool_holder()->java_mirror();
686 } else {
687 rcvr = LOCALS_OBJECT(0);
688 VERIFY_OOP(rcvr);
689 }
690 // The initial monitor is ours for the taking
691 BasicObjectLock* mon = &istate->monitor_base()[-1];
692 oop monobj = mon->obj();
693 assert(mon->obj() == rcvr, "method monitor mis-initialized");
694
695 bool success = UseBiasedLocking;
696 if (UseBiasedLocking) {
697 markOop mark = rcvr->mark();
698 if (mark->has_bias_pattern()) {
699 // The bias pattern is present in the object's header. Need to check
700 // whether the bias owner and the epoch are both still current.
701 intptr_t xx = ((intptr_t) THREAD) ^ (intptr_t) mark;
702 xx = (intptr_t) rcvr->klass()->prototype_header() ^ xx;
703 intptr_t yy = (xx & ~((int) markOopDesc::age_mask_in_place));
704 if (yy != 0 ) {
705 // At this point we know that the header has the bias pattern and
706 // that we are not the bias owner in the current epoch. We need to
707 // figure out more details about the state of the header in order to
708 // know what operations can be legally performed on the object's
709 // header.
710
711 // If the low three bits in the xor result aren't clear, that means
712 // the prototype header is no longer biased and we have to revoke
713 // the bias on this object.
714
715 if (yy & markOopDesc::biased_lock_mask_in_place == 0 ) {
716 // Biasing is still enabled for this data type. See whether the
717 // epoch of the current bias is still valid, meaning that the epoch
718 // bits of the mark word are equal to the epoch bits of the
719 // prototype header. (Note that the prototype header's epoch bits
720 // only change at a safepoint.) If not, attempt to rebias the object
721 // toward the current thread. Note that we must be absolutely sure
722 // that the current epoch is invalid in order to do this because
723 // otherwise the manipulations it performs on the mark word are
724 // illegal.
725 if (yy & markOopDesc::epoch_mask_in_place == 0) {
726 // The epoch of the current bias is still valid but we know nothing
727 // about the owner; it might be set or it might be clear. Try to
728 // acquire the bias of the object using an atomic operation. If this
729 // fails we will go in to the runtime to revoke the object's bias.
730 // Note that we first construct the presumed unbiased header so we
731 // don't accidentally blow away another thread's valid bias.
732 intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place |
733 markOopDesc::age_mask_in_place |
734 markOopDesc::epoch_mask_in_place);
735 if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) {
736 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
737 }
738 } else {
739 try_rebias:
740 // At this point we know the epoch has expired, meaning that the
741 // current "bias owner", if any, is actually invalid. Under these
742 // circumstances _only_, we are allowed to use the current header's
743 // value as the comparison value when doing the cas to acquire the
744 // bias in the current epoch. In other words, we allow transfer of
745 // the bias from one thread to another directly in this situation.
746 xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
747 if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->prototype_header(),
748 (intptr_t*) rcvr->mark_addr(),
749 (intptr_t) mark) != (intptr_t) mark) {
750 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
751 }
752 }
753 } else {
754 try_revoke_bias:
755 // The prototype mark in the klass doesn't have the bias bit set any
756 // more, indicating that objects of this data type are not supposed
757 // to be biased any more. We are going to try to reset the mark of
758 // this object to the prototype value and fall through to the
759 // CAS-based locking scheme. Note that if our CAS fails, it means
760 // that another thread raced us for the privilege of revoking the
761 // bias of this particular object, so it's okay to continue in the
762 // normal locking code.
763 //
764 xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
765 if (Atomic::cmpxchg_ptr(rcvr->klass()->prototype_header(),
766 (intptr_t*) rcvr->mark_addr(),
767 mark) == mark) {
768 // (*counters->revoked_lock_entry_count_addr())++;
769 success = false;
770 }
771 }
772 }
773 } else {
774 cas_label:
775 success = false;
776 }
777 }
778 if (!success) {
779 markOop displaced = rcvr->mark()->set_unlocked();
780 mon->lock()->set_displaced_header(displaced);
781 if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
782 // Is it simple recursive case?
783 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
784 mon->lock()->set_displaced_header(NULL);
785 } else {
786 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
787 }
788 }
789 }
790 }
791 THREAD->clr_do_not_unlock();
792
793 // Notify jvmti
794 #ifdef VM_JVMTI
795 if (_jvmti_interp_events) {
796 // Whenever JVMTI puts a thread in interp_only_mode, method
797 // entry/exit events are sent for that thread to track stack depth.
798 if (THREAD->is_interp_only_mode()) {
799 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
800 handle_exception);
801 }
802 }
803 #endif /* VM_JVMTI */
864 // get out of here
865 //
866 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
867 // this will do the right thing even if an exception is pending.
868 goto handle_return;
869 }
870 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
871 if (THREAD->has_pending_exception()) goto handle_exception;
872 goto run;
873 }
874 case got_monitors: {
875 // continue locking now that we have a monitor to use
876 // we expect to find newly allocated monitor at the "top" of the monitor stack.
877 oop lockee = STACK_OBJECT(-1);
878 VERIFY_OOP(lockee);
879 // derefing's lockee ought to provoke implicit null check
880 // find a free monitor
881 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
882 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
883 entry->set_obj(lockee);
884
885 markOop displaced = lockee->mark()->set_unlocked();
886 entry->lock()->set_displaced_header(displaced);
887 if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
888 // Is it simple recursive case?
889 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
890 entry->lock()->set_displaced_header(NULL);
891 } else {
892 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
893 }
894 }
895 UPDATE_PC_AND_TOS(1, -1);
896 goto run;
897 }
898 default: {
899 fatal("Unexpected message from frame manager");
900 }
901 }
902
903 run:
904
905 DO_UPDATE_INSTRUCTION_COUNT(*pc)
906 DEBUGGER_SINGLE_STEP_NOTIFY();
907 #ifdef PREFETCH_OPCCODE
908 opcode = *pc; /* prefetch first opcode */
909 #endif
910
911 #ifndef USELABELS
912 while (1)
913 #endif
914 {
1683
1684 /* monitorenter and monitorexit for locking/unlocking an object */
1685
1686 CASE(_monitorenter): {
1687 oop lockee = STACK_OBJECT(-1);
1688 // derefing's lockee ought to provoke implicit null check
1689 CHECK_NULL(lockee);
1690 // find a free monitor or one already allocated for this object
1691 // if we find a matching object then we need a new monitor
1692 // since this is recursive enter
1693 BasicObjectLock* limit = istate->monitor_base();
1694 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1695 BasicObjectLock* entry = NULL;
1696 while (most_recent != limit ) {
1697 if (most_recent->obj() == NULL) entry = most_recent;
1698 else if (most_recent->obj() == lockee) break;
1699 most_recent++;
1700 }
1701 if (entry != NULL) {
1702 entry->set_obj(lockee);
1703 markOop displaced = lockee->mark()->set_unlocked();
1704 entry->lock()->set_displaced_header(displaced);
1705 if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
1706 // Is it simple recursive case?
1707 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1708 entry->lock()->set_displaced_header(NULL);
1709 } else {
1710 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1711 }
1712 }
1713 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1714 } else {
1715 istate->set_msg(more_monitors);
1716 UPDATE_PC_AND_RETURN(0); // Re-execute
1717 }
1718 }
1719
1720 CASE(_monitorexit): {
1721 oop lockee = STACK_OBJECT(-1);
1722 CHECK_NULL(lockee);
1723 // derefing's lockee ought to provoke implicit null check
1724 // find our monitor slot
1725 BasicObjectLock* limit = istate->monitor_base();
1726 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1727 while (most_recent != limit ) {
1728 if ((most_recent)->obj() == lockee) {
1729 BasicLock* lock = most_recent->lock();
1730 markOop header = lock->displaced_header();
1731 most_recent->set_obj(NULL);
1732 // If it isn't recursive we either must swap old header or call the runtime
1733 if (header != NULL) {
1734 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
1735 // restore object for the slow case
1736 most_recent->set_obj(lockee);
1737 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1738 }
1739 }
1740 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1741 }
1742 most_recent++;
1743 }
1744 // Need to throw illegal monitor state exception
1745 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1746 ShouldNotReachHere();
1747 }
1748
1749 /* All of the non-quick opcodes. */
1750
1751 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1752 * constant pool index in the instruction.
1753 */
1754 CASE(_getfield):
1755 CASE(_getstatic):
1756 {
1757 u2 index;
1758 ConstantPoolCacheEntry* cache;
1759 index = Bytes::get_native_u2(pc+1);
2661 // examine all the entries in reverse time(and stack) order and
2662 // unlock as we find them. If we find the method monitor before
2663 // we are at the initial entry then we should throw an exception.
2664 // It is not clear the template based interpreter does this
2665 // correctly
2666
2667 BasicObjectLock* base = istate->monitor_base();
2668 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
2669 bool method_unlock_needed = METHOD->is_synchronized();
2670 // We know the initial monitor was used for the method don't check that
2671 // slot in the loop
2672 if (method_unlock_needed) base--;
2673
2674 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
2675 while (end < base) {
2676 oop lockee = end->obj();
2677 if (lockee != NULL) {
2678 BasicLock* lock = end->lock();
2679 markOop header = lock->displaced_header();
2680 end->set_obj(NULL);
2681 // If it isn't recursive we either must swap old header or call the runtime
2682 if (header != NULL) {
2683 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
2684 // restore object for the slow case
2685 end->set_obj(lockee);
2686 {
2687 // Prevent any HandleMarkCleaner from freeing our live handles
2688 HandleMark __hm(THREAD);
2689 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
2690 }
2691 }
2692 }
2693 // One error is plenty
2694 if (illegal_state_oop() == NULL && !suppress_error) {
2695 {
2696 // Prevent any HandleMarkCleaner from freeing our live handles
2697 HandleMark __hm(THREAD);
2698 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2699 }
2700 assert(THREAD->has_pending_exception(), "Lost our exception!");
2701 illegal_state_oop = THREAD->pending_exception();
2702 THREAD->clear_pending_exception();
2703 }
2704 }
2705 end++;
2706 }
2707 // Unlock the method if needed
2708 if (method_unlock_needed) {
2709 if (base->obj() == NULL) {
2710 // The method is already unlocked this is not good.
2711 if (illegal_state_oop() == NULL && !suppress_error) {
2712 {
2718 illegal_state_oop = THREAD->pending_exception();
2719 THREAD->clear_pending_exception();
2720 }
2721 } else {
2722 //
2723 // The initial monitor is always used for the method
2724 // However if that slot is no longer the oop for the method it was unlocked
2725 // and reused by something that wasn't unlocked!
2726 //
2727 // deopt can come in with rcvr dead because c2 knows
2728 // its value is preserved in the monitor. So we can't use locals[0] at all
2729 // and must use first monitor slot.
2730 //
2731 oop rcvr = base->obj();
2732 if (rcvr == NULL) {
2733 if (!suppress_error) {
2734 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
2735 illegal_state_oop = THREAD->pending_exception();
2736 THREAD->clear_pending_exception();
2737 }
2738 } else {
2739 BasicLock* lock = base->lock();
2740 markOop header = lock->displaced_header();
2741 base->set_obj(NULL);
2742 // If it isn't recursive we either must swap old header or call the runtime
2743 if (header != NULL) {
2744 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
2745 // restore object for the slow case
2746 base->set_obj(rcvr);
2747 {
2748 // Prevent any HandleMarkCleaner from freeing our live handles
2749 HandleMark __hm(THREAD);
2750 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
2751 }
2752 if (THREAD->has_pending_exception()) {
2753 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
2754 THREAD->clear_pending_exception();
2755 }
2756 }
2757 }
2758 }
2759 }
2760 }
2761 }
2762
2763 //
2764 // Notify jvmti/jvmdi
2765 //
2766 // NOTE: we do not notify a method_exit if we have a pending exception,
2767 // including an exception we generate for unlocking checks. In the former
2768 // case, JVMDI has already been notified by our call for the exception handler
2769 // and in both cases as far as JVMDI is concerned we have already returned.
2770 // If we notify it again JVMDI will be all confused about how many frames
2771 // are still on the stack (4340444).
2772 //
2773 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
2774 // method_exit events whenever we leave an activation unless it was done
2775 // for popframe. This is nothing like jvmdi. However we are passing the
2776 // tests at the moment (apparently because they are jvmdi based) so rather
2777 // than change this code and possibly fail tests we will leave it alone
2778 // (with this note) in anticipation of changing the vm and the tests
2779 // simultaneously.
2780
2781
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // no precompiled headers
26 #include "classfile/vmSymbols.hpp"
27 #include "gc_interface/collectedHeap.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/bytecodeInterpreter.hpp"
30 #include "interpreter/bytecodeInterpreter.inline.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interpreterRuntime.hpp"
33 #include "memory/cardTableModRefBS.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/methodCounters.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "runtime/biasedLocking.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/interfaceSupport.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/threadCritical.hpp"
45 #include "utilities/exceptions.hpp"
46 #ifdef TARGET_OS_ARCH_linux_x86
47 # include "orderAccess_linux_x86.inline.hpp"
48 #endif
49 #ifdef TARGET_OS_ARCH_linux_sparc
50 # include "orderAccess_linux_sparc.inline.hpp"
51 #endif
52 #ifdef TARGET_OS_ARCH_linux_zero
53 # include "orderAccess_linux_zero.inline.hpp"
54 #endif
55 #ifdef TARGET_OS_ARCH_solaris_x86
56 # include "orderAccess_solaris_x86.inline.hpp"
57 #endif
58 #ifdef TARGET_OS_ARCH_solaris_sparc
59 # include "orderAccess_solaris_sparc.inline.hpp"
672 tty->print_cr("entering: depth %d bci: %d",
673 (istate->_stack_base - istate->_stack),
674 istate->_bcp - istate->_method->code_base());
675 interesting = true;
676 }
677 }
678 #endif // HACK
679
680
681 // lock method if synchronized
682 if (METHOD->is_synchronized()) {
683 // oop rcvr = locals[0].j.r;
684 oop rcvr;
685 if (METHOD->is_static()) {
686 rcvr = METHOD->constants()->pool_holder()->java_mirror();
687 } else {
688 rcvr = LOCALS_OBJECT(0);
689 VERIFY_OOP(rcvr);
690 }
691 // The initial monitor is ours for the taking
692 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
693 BasicObjectLock* mon = &istate->monitor_base()[-1];
694 mon->set_obj(rcvr);
695 bool success = false;
696 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
697 markOop mark = rcvr->mark();
698 intptr_t hash = (intptr_t) markOopDesc::no_hash;
699 // Implies UseBiasedLocking.
700 if (mark->has_bias_pattern()) {
701 uintptr_t thread_ident;
702 uintptr_t anticipated_bias_locking_value;
703 thread_ident = (uintptr_t)istate->thread();
704 anticipated_bias_locking_value =
705 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
706 ~((uintptr_t) markOopDesc::age_mask_in_place);
707
708 if (anticipated_bias_locking_value == 0) {
709 // Already biased towards this thread, nothing to do.
710 if (PrintBiasedLockingStatistics) {
711 (* BiasedLocking::biased_lock_entry_count_addr())++;
712 }
713 success = true;
714 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
715 // Try to revoke bias.
716 markOop header = rcvr->klass()->prototype_header();
717 if (hash != markOopDesc::no_hash) {
718 header = header->copy_set_hash(hash);
719 }
720 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
721 if (PrintBiasedLockingStatistics)
722 (*BiasedLocking::revoked_lock_entry_count_addr())++;
723 }
724 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
725 // Try to rebias.
726 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
727 if (hash != markOopDesc::no_hash) {
728 new_header = new_header->copy_set_hash(hash);
729 }
730 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
731 if (PrintBiasedLockingStatistics) {
732 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
733 }
734 } else {
735 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
736 }
737 success = true;
738 } else {
739 // Try to bias towards thread in case object is anonymously biased.
740 markOop header = (markOop) ((uintptr_t) mark &
741 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
742 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
743 if (hash != markOopDesc::no_hash) {
744 header = header->copy_set_hash(hash);
745 }
746 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
747 // Debugging hint.
748 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
749 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
750 if (PrintBiasedLockingStatistics) {
751 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
752 }
753 } else {
754 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
755 }
756 success = true;
757 }
758 }
759
760 // Traditional lightweight locking.
761 if (!success) {
762 markOop displaced = rcvr->mark()->set_unlocked();
763 mon->lock()->set_displaced_header(displaced);
764 bool call_vm = UseHeavyMonitors;
765 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
766 // Is it simple recursive case?
767 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
768 mon->lock()->set_displaced_header(NULL);
769 } else {
770 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
771 }
772 }
773 }
774 }
775 THREAD->clr_do_not_unlock();
776
777 // Notify jvmti
778 #ifdef VM_JVMTI
779 if (_jvmti_interp_events) {
780 // Whenever JVMTI puts a thread in interp_only_mode, method
781 // entry/exit events are sent for that thread to track stack depth.
782 if (THREAD->is_interp_only_mode()) {
783 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
784 handle_exception);
785 }
786 }
787 #endif /* VM_JVMTI */
848 // get out of here
849 //
850 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
851 // this will do the right thing even if an exception is pending.
852 goto handle_return;
853 }
854 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
855 if (THREAD->has_pending_exception()) goto handle_exception;
856 goto run;
857 }
858 case got_monitors: {
859 // continue locking now that we have a monitor to use
860 // we expect to find newly allocated monitor at the "top" of the monitor stack.
861 oop lockee = STACK_OBJECT(-1);
862 VERIFY_OOP(lockee);
863 // derefing's lockee ought to provoke implicit null check
864 // find a free monitor
865 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
866 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
867 entry->set_obj(lockee);
868 bool success = false;
869 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
870
871 markOop mark = lockee->mark();
872 intptr_t hash = (intptr_t) markOopDesc::no_hash;
873 // implies UseBiasedLocking
874 if (mark->has_bias_pattern()) {
875 uintptr_t thread_ident;
876 uintptr_t anticipated_bias_locking_value;
877 thread_ident = (uintptr_t)istate->thread();
878 anticipated_bias_locking_value =
879 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
880 ~((uintptr_t) markOopDesc::age_mask_in_place);
881
882 if (anticipated_bias_locking_value == 0) {
883 // already biased towards this thread, nothing to do
884 if (PrintBiasedLockingStatistics) {
885 (* BiasedLocking::biased_lock_entry_count_addr())++;
886 }
887 success = true;
888 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
889 // try revoke bias
890 markOop header = lockee->klass()->prototype_header();
891 if (hash != markOopDesc::no_hash) {
892 header = header->copy_set_hash(hash);
893 }
894 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
895 if (PrintBiasedLockingStatistics) {
896 (*BiasedLocking::revoked_lock_entry_count_addr())++;
897 }
898 }
899 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
900 // try rebias
901 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
902 if (hash != markOopDesc::no_hash) {
903 new_header = new_header->copy_set_hash(hash);
904 }
905 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
906 if (PrintBiasedLockingStatistics) {
907 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
908 }
909 } else {
910 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
911 }
912 success = true;
913 } else {
914 // try to bias towards thread in case object is anonymously biased
915 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
916 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
917 if (hash != markOopDesc::no_hash) {
918 header = header->copy_set_hash(hash);
919 }
920 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
921 // debugging hint
922 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
923 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
924 if (PrintBiasedLockingStatistics) {
925 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
926 }
927 } else {
928 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
929 }
930 success = true;
931 }
932 }
933
934 // traditional lightweight locking
935 if (!success) {
936 markOop displaced = lockee->mark()->set_unlocked();
937 entry->lock()->set_displaced_header(displaced);
938 bool call_vm = UseHeavyMonitors;
939 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
940 // Is it simple recursive case?
941 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
942 entry->lock()->set_displaced_header(NULL);
943 } else {
944 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
945 }
946 }
947 }
948 UPDATE_PC_AND_TOS(1, -1);
949 goto run;
950 }
951 default: {
952 fatal("Unexpected message from frame manager");
953 }
954 }
955
956 run:
957
958 DO_UPDATE_INSTRUCTION_COUNT(*pc)
959 DEBUGGER_SINGLE_STEP_NOTIFY();
960 #ifdef PREFETCH_OPCCODE
961 opcode = *pc; /* prefetch first opcode */
962 #endif
963
964 #ifndef USELABELS
965 while (1)
966 #endif
967 {
1736
1737 /* monitorenter and monitorexit for locking/unlocking an object */
1738
1739 CASE(_monitorenter): {
1740 oop lockee = STACK_OBJECT(-1);
1741 // derefing's lockee ought to provoke implicit null check
1742 CHECK_NULL(lockee);
1743 // find a free monitor or one already allocated for this object
1744 // if we find a matching object then we need a new monitor
1745 // since this is recursive enter
1746 BasicObjectLock* limit = istate->monitor_base();
1747 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1748 BasicObjectLock* entry = NULL;
1749 while (most_recent != limit ) {
1750 if (most_recent->obj() == NULL) entry = most_recent;
1751 else if (most_recent->obj() == lockee) break;
1752 most_recent++;
1753 }
1754 if (entry != NULL) {
1755 entry->set_obj(lockee);
1756 int success = false;
1757 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
1758
1759 markOop mark = lockee->mark();
1760 intptr_t hash = (intptr_t) markOopDesc::no_hash;
1761 // implies UseBiasedLocking
1762 if (mark->has_bias_pattern()) {
1763 uintptr_t thread_ident;
1764 uintptr_t anticipated_bias_locking_value;
1765 thread_ident = (uintptr_t)istate->thread();
1766 anticipated_bias_locking_value =
1767 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
1768 ~((uintptr_t) markOopDesc::age_mask_in_place);
1769
1770 if (anticipated_bias_locking_value == 0) {
1771 // already biased towards this thread, nothing to do
1772 if (PrintBiasedLockingStatistics) {
1773 (* BiasedLocking::biased_lock_entry_count_addr())++;
1774 }
1775 success = true;
1776 }
1777 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
1778 // try revoke bias
1779 markOop header = lockee->klass()->prototype_header();
1780 if (hash != markOopDesc::no_hash) {
1781 header = header->copy_set_hash(hash);
1782 }
1783 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
1784 if (PrintBiasedLockingStatistics)
1785 (*BiasedLocking::revoked_lock_entry_count_addr())++;
1786 }
1787 }
1788 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
1789 // try rebias
1790 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
1791 if (hash != markOopDesc::no_hash) {
1792 new_header = new_header->copy_set_hash(hash);
1793 }
1794 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
1795 if (PrintBiasedLockingStatistics)
1796 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
1797 }
1798 else {
1799 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1800 }
1801 success = true;
1802 }
1803 else {
1804 // try to bias towards thread in case object is anonymously biased
1805 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
1806 (uintptr_t)markOopDesc::age_mask_in_place |
1807 epoch_mask_in_place));
1808 if (hash != markOopDesc::no_hash) {
1809 header = header->copy_set_hash(hash);
1810 }
1811 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
1812 // debugging hint
1813 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
1814 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
1815 if (PrintBiasedLockingStatistics)
1816 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
1817 }
1818 else {
1819 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1820 }
1821 success = true;
1822 }
1823 }
1824
1825 // traditional lightweight locking
1826 if (!success) {
1827 markOop displaced = lockee->mark()->set_unlocked();
1828 entry->lock()->set_displaced_header(displaced);
1829 bool call_vm = UseHeavyMonitors;
1830 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
1831 // Is it simple recursive case?
1832 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1833 entry->lock()->set_displaced_header(NULL);
1834 } else {
1835 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1836 }
1837 }
1838 }
1839 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1840 } else {
1841 istate->set_msg(more_monitors);
1842 UPDATE_PC_AND_RETURN(0); // Re-execute
1843 }
1844 }
1845
1846 CASE(_monitorexit): {
1847 oop lockee = STACK_OBJECT(-1);
1848 CHECK_NULL(lockee);
1849 // derefing's lockee ought to provoke implicit null check
1850 // find our monitor slot
1851 BasicObjectLock* limit = istate->monitor_base();
1852 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1853 while (most_recent != limit ) {
1854 if ((most_recent)->obj() == lockee) {
1855 BasicLock* lock = most_recent->lock();
1856 markOop header = lock->displaced_header();
1857 most_recent->set_obj(NULL);
1858 if (!lockee->mark()->has_bias_pattern()) {
1859 bool call_vm = UseHeavyMonitors;
1860 // If it isn't recursive we either must swap old header or call the runtime
1861 if (header != NULL || call_vm) {
1862 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
1863 // restore object for the slow case
1864 most_recent->set_obj(lockee);
1865 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1866 }
1867 }
1868 }
1869 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1870 }
1871 most_recent++;
1872 }
1873 // Need to throw illegal monitor state exception
1874 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1875 ShouldNotReachHere();
1876 }
1877
1878 /* All of the non-quick opcodes. */
1879
1880 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1881 * constant pool index in the instruction.
1882 */
1883 CASE(_getfield):
1884 CASE(_getstatic):
1885 {
1886 u2 index;
1887 ConstantPoolCacheEntry* cache;
1888 index = Bytes::get_native_u2(pc+1);
2790 // examine all the entries in reverse time(and stack) order and
2791 // unlock as we find them. If we find the method monitor before
2792 // we are at the initial entry then we should throw an exception.
2793 // It is not clear the template based interpreter does this
2794 // correctly
2795
2796 BasicObjectLock* base = istate->monitor_base();
2797 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
2798 bool method_unlock_needed = METHOD->is_synchronized();
2799 // We know the initial monitor was used for the method don't check that
2800 // slot in the loop
2801 if (method_unlock_needed) base--;
2802
2803 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
2804 while (end < base) {
2805 oop lockee = end->obj();
2806 if (lockee != NULL) {
2807 BasicLock* lock = end->lock();
2808 markOop header = lock->displaced_header();
2809 end->set_obj(NULL);
2810
2811 if (!lockee->mark()->has_bias_pattern()) {
2812 // If it isn't recursive we either must swap old header or call the runtime
2813 if (header != NULL) {
2814 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
2815 // restore object for the slow case
2816 end->set_obj(lockee);
2817 {
2818 // Prevent any HandleMarkCleaner from freeing our live handles
2819 HandleMark __hm(THREAD);
2820 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
2821 }
2822 }
2823 }
2824 }
2825 // One error is plenty
2826 if (illegal_state_oop() == NULL && !suppress_error) {
2827 {
2828 // Prevent any HandleMarkCleaner from freeing our live handles
2829 HandleMark __hm(THREAD);
2830 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2831 }
2832 assert(THREAD->has_pending_exception(), "Lost our exception!");
2833 illegal_state_oop = THREAD->pending_exception();
2834 THREAD->clear_pending_exception();
2835 }
2836 }
2837 end++;
2838 }
2839 // Unlock the method if needed
2840 if (method_unlock_needed) {
2841 if (base->obj() == NULL) {
2842 // The method is already unlocked this is not good.
2843 if (illegal_state_oop() == NULL && !suppress_error) {
2844 {
2850 illegal_state_oop = THREAD->pending_exception();
2851 THREAD->clear_pending_exception();
2852 }
2853 } else {
2854 //
2855 // The initial monitor is always used for the method
2856 // However if that slot is no longer the oop for the method it was unlocked
2857 // and reused by something that wasn't unlocked!
2858 //
2859 // deopt can come in with rcvr dead because c2 knows
2860 // its value is preserved in the monitor. So we can't use locals[0] at all
2861 // and must use first monitor slot.
2862 //
2863 oop rcvr = base->obj();
2864 if (rcvr == NULL) {
2865 if (!suppress_error) {
2866 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
2867 illegal_state_oop = THREAD->pending_exception();
2868 THREAD->clear_pending_exception();
2869 }
2870 } else if (UseHeavyMonitors) {
2871 {
2872 // Prevent any HandleMarkCleaner from freeing our live handles.
2873 HandleMark __hm(THREAD);
2874 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
2875 }
2876 if (THREAD->has_pending_exception()) {
2877 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
2878 THREAD->clear_pending_exception();
2879 }
2880 } else {
2881 BasicLock* lock = base->lock();
2882 markOop header = lock->displaced_header();
2883 base->set_obj(NULL);
2884
2885 if (!rcvr->mark()->has_bias_pattern()) {
2886 base->set_obj(NULL);
2887 // If it isn't recursive we either must swap old header or call the runtime
2888 if (header != NULL) {
2889 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
2890 // restore object for the slow case
2891 base->set_obj(rcvr);
2892 {
2893 // Prevent any HandleMarkCleaner from freeing our live handles
2894 HandleMark __hm(THREAD);
2895 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
2896 }
2897 if (THREAD->has_pending_exception()) {
2898 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
2899 THREAD->clear_pending_exception();
2900 }
2901 }
2902 }
2903 }
2904 }
2905 }
2906 }
2907 }
2908 // Clear the do_not_unlock flag now.
2909 THREAD->clr_do_not_unlock();
2910
2911 //
2912 // Notify jvmti/jvmdi
2913 //
2914 // NOTE: we do not notify a method_exit if we have a pending exception,
2915 // including an exception we generate for unlocking checks. In the former
2916 // case, JVMDI has already been notified by our call for the exception handler
2917 // and in both cases as far as JVMDI is concerned we have already returned.
2918 // If we notify it again JVMDI will be all confused about how many frames
2919 // are still on the stack (4340444).
2920 //
2921 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
2922 // method_exit events whenever we leave an activation unless it was done
2923 // for popframe. This is nothing like jvmdi. However we are passing the
2924 // tests at the moment (apparently because they are jvmdi based) so rather
2925 // than change this code and possibly fail tests we will leave it alone
2926 // (with this note) in anticipation of changing the vm and the tests
2927 // simultaneously.
2928
2929
|