809 // ESelf was previously on the WaitSet but we just unlinked it above
810 // because of a timeout. ESelf is not resident on any list and is not OnDeck
811 assert(_OnDeck != ESelf, "invariant");
812 ILock(Self);
813 } else {
814 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
815 // ESelf is now on the cxq, EntryList or at the OnDeck position.
816 // The following fragment is extracted from Monitor::ILock()
817 for (;;) {
818 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
819 ParkCommon(ESelf, 0);
820 }
821 assert(_OnDeck == ESelf, "invariant");
822 _OnDeck = NULL;
823 }
824
825 assert(ILocked(), "invariant");
826 return WasOnWaitSet != 0; // return true IFF timeout
827 }
828
829
830 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
831 // In particular, there are certain types of global lock that may be held
832 // by a Java thread while it is blocked at a safepoint but before it has
833 // written the _owner field. These locks may be sneakily acquired by the
834 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
835 // identify all such locks, and ensure that Java threads never block at
836 // safepoints while holding them (_no_safepoint_check_flag). While it
837 // seems as though this could increase the time to reach a safepoint
838 // (or at least increase the mean, if not the variance), the latter
839 // approach might make for a cleaner, more maintainable JVM design.
840 //
841 // Sneaking is vile and reprehensible and should be excised at the 1st
842 // opportunity. It's possible that the need for sneaking could be obviated
843 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
844 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
845 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
846 // it'll stall at the TBIVM reentry state transition after having acquired the
847 // underlying lock, but before having set _owner and having entered the actual
848 // critical section. The lock-sneaking facility leverages that fact and allowed the
849 // VM thread to logically acquire locks that had already be physically locked by mutators
850 // but where mutators were known blocked by the reentry thread state transition.
851 //
852 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
853 // wrapped calls to park(), then we could likely do away with sneaking. We'd
854 // decouple lock acquisition and parking. The critical invariant to eliminating
855 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
856 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
857 // One difficulty with this approach is that the TBIVM wrapper could recurse and
858 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
859 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
860 //
861 // But of course the proper ultimate approach is to avoid schemes that require explicit
862 // sneaking or dependence on any any clever invariants or subtle implementation properties
863 // of Mutex-Monitor and instead directly address the underlying design flaw.
864
865 void Monitor::lock(Thread * Self) {
866 // Ensure that the Monitor requires/allows safepoint checks.
867 assert(_safepoint_check_required != Monitor::_safepoint_check_never,
868 "This lock should never have a safepoint check: %s", name());
869
870 #ifdef CHECK_UNHANDLED_OOPS
871 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
872 // or GC threads.
873 if (Self->is_Java_thread()) {
874 Self->clear_unhandled_oops();
875 }
876 #endif // CHECK_UNHANDLED_OOPS
877
878 debug_only(check_prelock_state(Self, StrictSafepointChecks));
879 assert(_owner != Self, "invariant");
880 assert(_OnDeck != Self->_MutexEvent, "invariant");
881
882 if (TryFast()) {
883 Exeunt:
884 assert(ILocked(), "invariant");
885 assert(owner() == NULL, "invariant");
886 set_owner(Self);
887 return;
888 }
909 ThreadBlockInVM tbivm((JavaThread *) Self);
910 ILock(Self);
911 } else {
912 // Mirabile dictu
913 ILock(Self);
914 }
915 goto Exeunt;
916 }
917
918 void Monitor::lock() {
919 this->lock(Thread::current());
920 }
921
922 // Lock without safepoint check - a degenerate variant of lock().
923 // Should ONLY be used by safepoint code and other code
924 // that is guaranteed not to block while running inside the VM. If this is called with
925 // thread state set to be in VM, the safepoint synchronization code will deadlock!
926
927 void Monitor::lock_without_safepoint_check(Thread * Self) {
928 // Ensure that the Monitor does not require or allow safepoint checks.
929 assert(_safepoint_check_required != Monitor::_safepoint_check_always,
930 "This lock should always have a safepoint check: %s", name());
931 assert(_owner != Self, "invariant");
932 ILock(Self);
933 assert(_owner == NULL, "invariant");
934 set_owner(Self);
935 }
936
937 void Monitor::lock_without_safepoint_check() {
938 lock_without_safepoint_check(Thread::current());
939 }
940
941
942 // Returns true if thread succeeds in grabbing the lock, otherwise false.
943
944 bool Monitor::try_lock() {
945 Thread * const Self = Thread::current();
946 debug_only(check_prelock_state(Self, false));
947 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
948
949 // Special case, where all Java threads are stopped.
950 // The lock may have been acquired but _owner is not yet set.
1040 assert(_OnDeck == ESelf, "invariant");
1041 _OnDeck = NULL;
1042 ParkEvent::Release(ESelf); // surrender the ParkEvent
1043 goto Exeunt;
1044 }
1045
1046 void Monitor::jvm_raw_unlock() {
1047 // Nearly the same as Monitor::unlock() ...
1048 // directly set _owner instead of using set_owner(null)
1049 _owner = NULL;
1050 if (_snuck) { // ???
1051 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1052 _snuck = false;
1053 return;
1054 }
1055 IUnlock(false);
1056 }
1057
1058 bool Monitor::wait(bool no_safepoint_check, long timeout,
1059 bool as_suspend_equivalent) {
1060 // Make sure safepoint checking is used properly.
1061 assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false),
1062 "This lock should never have a safepoint check: %s", name());
1063 assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true),
1064 "This lock should always have a safepoint check: %s", name());
1065
1066 Thread * const Self = Thread::current();
1067 assert(_owner == Self, "invariant");
1068 assert(ILocked(), "invariant");
1069
1070 // as_suspend_equivalent logically implies !no_safepoint_check
1071 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
1072 // !no_safepoint_check logically implies java_thread
1073 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
1074
1075 #ifdef ASSERT
1076 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1077 assert(least != this, "Specification of get_least_... call above");
1078 if (least != NULL && least->rank() <= special) {
1079 tty->print("Attempting to wait on monitor %s/%d while holding"
1080 " lock %s/%d -- possible deadlock",
1081 name(), rank(), least->name(), least->rank());
1082 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1083 }
1084 #endif // ASSERT
1085
1086 int wait_status;
1087 // conceptually set the owner to NULL in anticipation of
1088 // abdicating the lock in wait
1089 set_owner(NULL);
1090 if (no_safepoint_check) {
1091 wait_status = IWait(Self, timeout);
1092 } else {
1093 assert(Self->is_Java_thread(), "invariant");
1094 JavaThread *jt = (JavaThread *)Self;
1095
1096 // Enter safepoint region - ornate and Rococo ...
1097 ThreadBlockInVM tbivm(jt);
1098 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1099
1100 if (as_suspend_equivalent) {
1101 jt->set_suspend_equivalent();
1102 // cleared by handle_special_suspend_equivalent_condition() or
1103 // java_suspend_self()
1104 }
1105
1106 wait_status = IWait(Self, timeout);
1107
1108 // were we externally suspended while we were waiting?
1109 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1110 // Our event wait has finished and we own the lock, but
|
809 // ESelf was previously on the WaitSet but we just unlinked it above
810 // because of a timeout. ESelf is not resident on any list and is not OnDeck
811 assert(_OnDeck != ESelf, "invariant");
812 ILock(Self);
813 } else {
814 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
815 // ESelf is now on the cxq, EntryList or at the OnDeck position.
816 // The following fragment is extracted from Monitor::ILock()
817 for (;;) {
818 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
819 ParkCommon(ESelf, 0);
820 }
821 assert(_OnDeck == ESelf, "invariant");
822 _OnDeck = NULL;
823 }
824
825 assert(ILocked(), "invariant");
826 return WasOnWaitSet != 0; // return true IFF timeout
827 }
828
829 #ifndef PRODUCT
830 void Monitor::check_safepoint_state(Thread* Self, bool do_safepoint_check) {
831 // If you check for safepoint, verify that the lock wasn't created with safepoint_check_never
832 // to allow safepoint_check_sometimes.
833 SafepointCheckRequired not_allowed = do_safepoint_check ?
834 Monitor::_safepoint_check_never :
835 Monitor::_safepoint_check_always;
836 assert(!Self->is_Java_thread() || _safepoint_check_required != not_allowed,
837 "This lock should %s have a safepoint check: %s",
838 _safepoint_check_required ? "always" : "never", name());
839 }
840 #endif
841
842 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
843 // In particular, there are certain types of global lock that may be held
844 // by a Java thread while it is blocked at a safepoint but before it has
845 // written the _owner field. These locks may be sneakily acquired by the
846 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
847 // identify all such locks, and ensure that Java threads never block at
848 // safepoints while holding them (_no_safepoint_check_flag). While it
849 // seems as though this could increase the time to reach a safepoint
850 // (or at least increase the mean, if not the variance), the latter
851 // approach might make for a cleaner, more maintainable JVM design.
852 //
853 // Sneaking is vile and reprehensible and should be excised at the 1st
854 // opportunity. It's possible that the need for sneaking could be obviated
855 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
856 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
857 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
858 // it'll stall at the TBIVM reentry state transition after having acquired the
859 // underlying lock, but before having set _owner and having entered the actual
860 // critical section. The lock-sneaking facility leverages that fact and allowed the
861 // VM thread to logically acquire locks that had already be physically locked by mutators
862 // but where mutators were known blocked by the reentry thread state transition.
863 //
864 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
865 // wrapped calls to park(), then we could likely do away with sneaking. We'd
866 // decouple lock acquisition and parking. The critical invariant to eliminating
867 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
868 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
869 // One difficulty with this approach is that the TBIVM wrapper could recurse and
870 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
871 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
872 //
873 // But of course the proper ultimate approach is to avoid schemes that require explicit
874 // sneaking or dependence on any any clever invariants or subtle implementation properties
875 // of Mutex-Monitor and instead directly address the underlying design flaw.
876
877 void Monitor::lock(Thread * Self) {
878 // Ensure that the Monitor requires/allows safepoint checks.
879 check_safepoint_state(Self, true);
880
881 #ifdef CHECK_UNHANDLED_OOPS
882 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
883 // or GC threads.
884 if (Self->is_Java_thread()) {
885 Self->clear_unhandled_oops();
886 }
887 #endif // CHECK_UNHANDLED_OOPS
888
889 debug_only(check_prelock_state(Self, StrictSafepointChecks));
890 assert(_owner != Self, "invariant");
891 assert(_OnDeck != Self->_MutexEvent, "invariant");
892
893 if (TryFast()) {
894 Exeunt:
895 assert(ILocked(), "invariant");
896 assert(owner() == NULL, "invariant");
897 set_owner(Self);
898 return;
899 }
920 ThreadBlockInVM tbivm((JavaThread *) Self);
921 ILock(Self);
922 } else {
923 // Mirabile dictu
924 ILock(Self);
925 }
926 goto Exeunt;
927 }
928
929 void Monitor::lock() {
930 this->lock(Thread::current());
931 }
932
933 // Lock without safepoint check - a degenerate variant of lock().
934 // Should ONLY be used by safepoint code and other code
935 // that is guaranteed not to block while running inside the VM. If this is called with
936 // thread state set to be in VM, the safepoint synchronization code will deadlock!
937
938 void Monitor::lock_without_safepoint_check(Thread * Self) {
939 // Ensure that the Monitor does not require or allow safepoint checks.
940 check_safepoint_state(Self, false);
941 assert(_owner != Self, "invariant");
942 ILock(Self);
943 assert(_owner == NULL, "invariant");
944 set_owner(Self);
945 }
946
947 void Monitor::lock_without_safepoint_check() {
948 lock_without_safepoint_check(Thread::current());
949 }
950
951
952 // Returns true if thread succeeds in grabbing the lock, otherwise false.
953
954 bool Monitor::try_lock() {
955 Thread * const Self = Thread::current();
956 debug_only(check_prelock_state(Self, false));
957 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
958
959 // Special case, where all Java threads are stopped.
960 // The lock may have been acquired but _owner is not yet set.
1050 assert(_OnDeck == ESelf, "invariant");
1051 _OnDeck = NULL;
1052 ParkEvent::Release(ESelf); // surrender the ParkEvent
1053 goto Exeunt;
1054 }
1055
1056 void Monitor::jvm_raw_unlock() {
1057 // Nearly the same as Monitor::unlock() ...
1058 // directly set _owner instead of using set_owner(null)
1059 _owner = NULL;
1060 if (_snuck) { // ???
1061 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1062 _snuck = false;
1063 return;
1064 }
1065 IUnlock(false);
1066 }
1067
1068 bool Monitor::wait(bool no_safepoint_check, long timeout,
1069 bool as_suspend_equivalent) {
1070 Thread * const Self = Thread::current();
1071
1072 // Make sure safepoint checking is used properly.
1073 check_safepoint_state(Self, !no_safepoint_check);
1074 assert(_owner == Self, "invariant");
1075 assert(ILocked(), "invariant");
1076
1077 // as_suspend_equivalent logically implies !no_safepoint_check
1078 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
1079 // !no_safepoint_check logically implies java_thread
1080 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
1081
1082 #ifdef ASSERT
1083 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1084 assert(least != this, "Specification of get_least_... call above");
1085 if (least != NULL && least->rank() <= special) {
1086 tty->print("Attempting to wait on monitor %s/%d while holding"
1087 " lock %s/%d -- possible deadlock",
1088 name(), rank(), least->name(), least->rank());
1089 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1090 }
1091 #endif // ASSERT
1092
1093 int wait_status;
1094 // conceptually set the owner to NULL in anticipation of
1095 // abdicating the lock in wait
1096 set_owner(NULL);
1097 if (no_safepoint_check || !Self->is_Java_thread()) {
1098 wait_status = IWait(Self, timeout);
1099 } else {
1100 assert(Self->is_Java_thread(), "invariant");
1101 JavaThread *jt = (JavaThread *)Self;
1102
1103 // Enter safepoint region - ornate and Rococo ...
1104 ThreadBlockInVM tbivm(jt);
1105 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1106
1107 if (as_suspend_equivalent) {
1108 jt->set_suspend_equivalent();
1109 // cleared by handle_special_suspend_equivalent_condition() or
1110 // java_suspend_self()
1111 }
1112
1113 wait_status = IWait(Self, timeout);
1114
1115 // were we externally suspended while we were waiting?
1116 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1117 // Our event wait has finished and we own the lock, but
|