< prev index next >

src/share/vm/runtime/objectMonitor.cpp

Print this page
rev 10456 : 8151593: Cleanup definition/usage of INLINE/NOINLINE macros and add xlC support
Contributed-by: matthias.baesken@sap.com


  27 #include "memory/resourceArea.hpp"
  28 #include "oops/markOop.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/atomic.inline.hpp"
  31 #include "runtime/handles.inline.hpp"
  32 #include "runtime/interfaceSupport.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/objectMonitor.hpp"
  35 #include "runtime/objectMonitor.inline.hpp"
  36 #include "runtime/orderAccess.inline.hpp"
  37 #include "runtime/osThread.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/thread.inline.hpp"
  40 #include "services/threadService.hpp"
  41 #include "trace/tracing.hpp"
  42 #include "trace/traceMacros.hpp"
  43 #include "utilities/dtrace.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/preserveException.hpp"
  46 
  47 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
  48 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  49   #define NOINLINE __attribute__((noinline))
  50 #else
  51   #define NOINLINE
  52 #endif
  53 
  54 
  55 #ifdef DTRACE_ENABLED
  56 
  57 // Only bother with this argument setup if dtrace is available
  58 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  59 
  60 
  61 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  62   char* bytes = NULL;                                                      \
  63   int len = 0;                                                             \
  64   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  65   Symbol* klassname = ((oop)obj)->klass()->name();                         \
  66   if (klassname != NULL) {                                                 \
  67     bytes = (char*)klassname->bytes();                                     \
  68     len = klassname->utf8_length();                                        \
  69   }
  70 
  71 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  72   {                                                                        \
  73     if (DTraceMonitorProbes) {                                             \
  74       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \


 237 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 238 //   it's likely the notifyee would simply impale itself on the lock held
 239 //   by the notifier.
 240 //
 241 // * An interesting alternative is to encode cxq as (List,LockByte) where
 242 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 243 //   variable, like _recursions, in the scheme.  The threads or Events that form
 244 //   the list would have to be aligned in 256-byte addresses.  A thread would
 245 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
 246 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 247 //   Note that is is *not* word-tearing, but it does presume that full-word
 248 //   CAS operations are coherent with intermix with STB operations.  That's true
 249 //   on most common processors.
 250 //
 251 // * See also http://blogs.sun.com/dave
 252 
 253 
 254 // -----------------------------------------------------------------------------
 255 // Enter support
 256 
 257 void NOINLINE ObjectMonitor::enter(TRAPS) {
 258   // The following code is ordered to check the most common cases first
 259   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 260   Thread * const Self = THREAD;
 261 
 262   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
 263   if (cur == NULL) {
 264     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 265     assert(_recursions == 0, "invariant");
 266     assert(_owner == Self, "invariant");
 267     return;
 268   }
 269 
 270   if (cur == Self) {
 271     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 272     _recursions++;
 273     return;
 274   }
 275 
 276   if (Self->is_lock_owned ((address)cur)) {
 277     assert(_recursions == 0, "internal state error");


 414 // Callers must compensate as needed.
 415 
 416 int ObjectMonitor::TryLock(Thread * Self) {
 417   void * own = _owner;
 418   if (own != NULL) return 0;
 419   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 420     // Either guarantee _recursions == 0 or set _recursions = 0.
 421     assert(_recursions == 0, "invariant");
 422     assert(_owner == Self, "invariant");
 423     return 1;
 424   }
 425   // The lock had been free momentarily, but we lost the race to the lock.
 426   // Interference -- the CAS failed.
 427   // We can either return -1 or retry.
 428   // Retry doesn't make as much sense because the lock was just acquired.
 429   return -1;
 430 }
 431 
 432 #define MAX_RECHECK_INTERVAL 1000
 433 
 434 void NOINLINE ObjectMonitor::EnterI(TRAPS) {
 435   Thread * const Self = THREAD;
 436   assert(Self->is_Java_thread(), "invariant");
 437   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 438 
 439   // Try the lock - TATAS
 440   if (TryLock (Self) > 0) {
 441     assert(_succ != Self, "invariant");
 442     assert(_owner == Self, "invariant");
 443     assert(_Responsible != Self, "invariant");
 444     return;
 445   }
 446 
 447   DeferredInitialize();
 448 
 449   // We try one round of spinning *before* enqueueing Self.
 450   //
 451   // If the _owner is ready but OFFPROC we could use a YieldTo()
 452   // operation to donate the remainder of this thread's quantum
 453   // to the owner.  This has subtle but beneficial affinity
 454   // effects.


 664   //
 665   // Critically, any prior STs to _succ or EntryList must be visible before
 666   // the ST of null into _owner in the *subsequent* (following) corresponding
 667   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 668   // execute a serializing instruction.
 669 
 670   if (SyncFlags & 8) {
 671     OrderAccess::fence();
 672   }
 673   return;
 674 }
 675 
 676 // ReenterI() is a specialized inline form of the latter half of the
 677 // contended slow-path from EnterI().  We use ReenterI() only for
 678 // monitor reentry in wait().
 679 //
 680 // In the future we should reconcile EnterI() and ReenterI(), adding
 681 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 682 // loop accordingly.
 683 
 684 void NOINLINE ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 685   assert(Self != NULL, "invariant");
 686   assert(SelfNode != NULL, "invariant");
 687   assert(SelfNode->_thread == Self, "invariant");
 688   assert(_waiters > 0, "invariant");
 689   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 690   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 691   JavaThread * jt = (JavaThread *) Self;
 692 
 693   int nWakeups = 0;
 694   for (;;) {
 695     ObjectWaiter::TStates v = SelfNode->TState;
 696     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 697     assert(_owner != Self, "invariant");
 698 
 699     if (TryLock(Self) > 0) break;
 700     if (TrySpin(Self) > 0) break;
 701 
 702     TEVENT(Wait Reentry - parking);
 703 
 704     // State transition wrappers around park() ...


 877 // If a thread transiently strands it'll park until (a) another
 878 // thread acquires the lock and then drops the lock, at which time the
 879 // exiting thread will notice and unpark the stranded thread, or, (b)
 880 // the timer expires.  If the lock is high traffic then the stranding latency
 881 // will be low due to (a).  If the lock is low traffic then the odds of
 882 // stranding are lower, although the worst-case stranding latency
 883 // is longer.  Critically, we don't want to put excessive load in the
 884 // platform's timer subsystem.  We want to minimize both the timer injection
 885 // rate (timers created/sec) as well as the number of timers active at
 886 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 887 // the integral of the # of active timers at any instant over time).
 888 // Both impinge on OS scalability.  Given that, at most one thread parked on
 889 // a monitor will use a timer.
 890 //
 891 // There is also the risk of a futile wake-up. If we drop the lock
 892 // another thread can reacquire the lock immediately, and we can
 893 // then wake a thread unnecessarily. This is benign, and we've
 894 // structured the code so the windows are short and the frequency
 895 // of such futile wakups is low.
 896 
 897 void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
 898   Thread * const Self = THREAD;
 899   if (THREAD != _owner) {
 900     if (THREAD->is_lock_owned((address) _owner)) {
 901       // Transmute _owner from a BasicLock pointer to a Thread address.
 902       // We don't need to hold _mutex for this transition.
 903       // Non-null to Non-null is safe as long as all readers can
 904       // tolerate either flavor.
 905       assert(_recursions == 0, "invariant");
 906       _owner = THREAD;
 907       _recursions = 0;
 908     } else {
 909       // Apparent unbalanced locking ...
 910       // Naively we'd like to throw IllegalMonitorStateException.
 911       // As a practical matter we can neither allocate nor throw an
 912       // exception as ::exit() can be called from leaf routines.
 913       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 914       // Upon deeper reflection, however, in a properly run JVM the only
 915       // way we should encounter this situation is in the presence of
 916       // unbalanced JNI locking. TODO: CheckJNICalls.
 917       // See also: CR4414101




  27 #include "memory/resourceArea.hpp"
  28 #include "oops/markOop.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/atomic.inline.hpp"
  31 #include "runtime/handles.inline.hpp"
  32 #include "runtime/interfaceSupport.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/objectMonitor.hpp"
  35 #include "runtime/objectMonitor.inline.hpp"
  36 #include "runtime/orderAccess.inline.hpp"
  37 #include "runtime/osThread.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/thread.inline.hpp"
  40 #include "services/threadService.hpp"
  41 #include "trace/tracing.hpp"
  42 #include "trace/traceMacros.hpp"
  43 #include "utilities/dtrace.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/preserveException.hpp"
  46 








  47 #ifdef DTRACE_ENABLED
  48 
  49 // Only bother with this argument setup if dtrace is available
  50 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  51 
  52 
  53 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  54   char* bytes = NULL;                                                      \
  55   int len = 0;                                                             \
  56   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  57   Symbol* klassname = ((oop)obj)->klass()->name();                         \
  58   if (klassname != NULL) {                                                 \
  59     bytes = (char*)klassname->bytes();                                     \
  60     len = klassname->utf8_length();                                        \
  61   }
  62 
  63 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  64   {                                                                        \
  65     if (DTraceMonitorProbes) {                                             \
  66       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \


 229 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 230 //   it's likely the notifyee would simply impale itself on the lock held
 231 //   by the notifier.
 232 //
 233 // * An interesting alternative is to encode cxq as (List,LockByte) where
 234 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 235 //   variable, like _recursions, in the scheme.  The threads or Events that form
 236 //   the list would have to be aligned in 256-byte addresses.  A thread would
 237 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
 238 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 239 //   Note that is is *not* word-tearing, but it does presume that full-word
 240 //   CAS operations are coherent with intermix with STB operations.  That's true
 241 //   on most common processors.
 242 //
 243 // * See also http://blogs.sun.com/dave
 244 
 245 
 246 // -----------------------------------------------------------------------------
 247 // Enter support
 248 
 249 void ObjectMonitor::enter(TRAPS) {
 250   // The following code is ordered to check the most common cases first
 251   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 252   Thread * const Self = THREAD;
 253 
 254   void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
 255   if (cur == NULL) {
 256     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 257     assert(_recursions == 0, "invariant");
 258     assert(_owner == Self, "invariant");
 259     return;
 260   }
 261 
 262   if (cur == Self) {
 263     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 264     _recursions++;
 265     return;
 266   }
 267 
 268   if (Self->is_lock_owned ((address)cur)) {
 269     assert(_recursions == 0, "internal state error");


 406 // Callers must compensate as needed.
 407 
 408 int ObjectMonitor::TryLock(Thread * Self) {
 409   void * own = _owner;
 410   if (own != NULL) return 0;
 411   if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 412     // Either guarantee _recursions == 0 or set _recursions = 0.
 413     assert(_recursions == 0, "invariant");
 414     assert(_owner == Self, "invariant");
 415     return 1;
 416   }
 417   // The lock had been free momentarily, but we lost the race to the lock.
 418   // Interference -- the CAS failed.
 419   // We can either return -1 or retry.
 420   // Retry doesn't make as much sense because the lock was just acquired.
 421   return -1;
 422 }
 423 
 424 #define MAX_RECHECK_INTERVAL 1000
 425 
 426 void ObjectMonitor::EnterI(TRAPS) {
 427   Thread * const Self = THREAD;
 428   assert(Self->is_Java_thread(), "invariant");
 429   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 430 
 431   // Try the lock - TATAS
 432   if (TryLock (Self) > 0) {
 433     assert(_succ != Self, "invariant");
 434     assert(_owner == Self, "invariant");
 435     assert(_Responsible != Self, "invariant");
 436     return;
 437   }
 438 
 439   DeferredInitialize();
 440 
 441   // We try one round of spinning *before* enqueueing Self.
 442   //
 443   // If the _owner is ready but OFFPROC we could use a YieldTo()
 444   // operation to donate the remainder of this thread's quantum
 445   // to the owner.  This has subtle but beneficial affinity
 446   // effects.


 656   //
 657   // Critically, any prior STs to _succ or EntryList must be visible before
 658   // the ST of null into _owner in the *subsequent* (following) corresponding
 659   // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 660   // execute a serializing instruction.
 661 
 662   if (SyncFlags & 8) {
 663     OrderAccess::fence();
 664   }
 665   return;
 666 }
 667 
 668 // ReenterI() is a specialized inline form of the latter half of the
 669 // contended slow-path from EnterI().  We use ReenterI() only for
 670 // monitor reentry in wait().
 671 //
 672 // In the future we should reconcile EnterI() and ReenterI(), adding
 673 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 674 // loop accordingly.
 675 
 676 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
 677   assert(Self != NULL, "invariant");
 678   assert(SelfNode != NULL, "invariant");
 679   assert(SelfNode->_thread == Self, "invariant");
 680   assert(_waiters > 0, "invariant");
 681   assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
 682   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 683   JavaThread * jt = (JavaThread *) Self;
 684 
 685   int nWakeups = 0;
 686   for (;;) {
 687     ObjectWaiter::TStates v = SelfNode->TState;
 688     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 689     assert(_owner != Self, "invariant");
 690 
 691     if (TryLock(Self) > 0) break;
 692     if (TrySpin(Self) > 0) break;
 693 
 694     TEVENT(Wait Reentry - parking);
 695 
 696     // State transition wrappers around park() ...


 869 // If a thread transiently strands it'll park until (a) another
 870 // thread acquires the lock and then drops the lock, at which time the
 871 // exiting thread will notice and unpark the stranded thread, or, (b)
 872 // the timer expires.  If the lock is high traffic then the stranding latency
 873 // will be low due to (a).  If the lock is low traffic then the odds of
 874 // stranding are lower, although the worst-case stranding latency
 875 // is longer.  Critically, we don't want to put excessive load in the
 876 // platform's timer subsystem.  We want to minimize both the timer injection
 877 // rate (timers created/sec) as well as the number of timers active at
 878 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 879 // the integral of the # of active timers at any instant over time).
 880 // Both impinge on OS scalability.  Given that, at most one thread parked on
 881 // a monitor will use a timer.
 882 //
 883 // There is also the risk of a futile wake-up. If we drop the lock
 884 // another thread can reacquire the lock immediately, and we can
 885 // then wake a thread unnecessarily. This is benign, and we've
 886 // structured the code so the windows are short and the frequency
 887 // of such futile wakups is low.
 888 
 889 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 890   Thread * const Self = THREAD;
 891   if (THREAD != _owner) {
 892     if (THREAD->is_lock_owned((address) _owner)) {
 893       // Transmute _owner from a BasicLock pointer to a Thread address.
 894       // We don't need to hold _mutex for this transition.
 895       // Non-null to Non-null is safe as long as all readers can
 896       // tolerate either flavor.
 897       assert(_recursions == 0, "invariant");
 898       _owner = THREAD;
 899       _recursions = 0;
 900     } else {
 901       // Apparent unbalanced locking ...
 902       // Naively we'd like to throw IllegalMonitorStateException.
 903       // As a practical matter we can neither allocate nor throw an
 904       // exception as ::exit() can be called from leaf routines.
 905       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
 906       // Upon deeper reflection, however, in a properly run JVM the only
 907       // way we should encounter this situation is in the presence of
 908       // unbalanced JNI locking. TODO: CheckJNICalls.
 909       // See also: CR4414101


< prev index next >