src/share/vm/runtime/objectMonitor.cpp

Print this page
rev 4899 : 8019973: PPC64 (part 11): Fix IA64 preprocessor conditionals on AIX.
Summary: On AIX 7.1 systemcfg.h defines IA64 unconditionally, so test for !AIX where IA64 is used. Clean up old gcc bug workaround (ATTR) and includes in forte.cpp.


  37 #include "runtime/thread.inline.hpp"
  38 #include "services/threadService.hpp"
  39 #include "trace/tracing.hpp"
  40 #include "trace/traceMacros.hpp"
  41 #include "utilities/dtrace.hpp"
  42 #include "utilities/macros.hpp"
  43 #include "utilities/preserveException.hpp"
  44 #ifdef TARGET_OS_FAMILY_linux
  45 # include "os_linux.inline.hpp"
  46 #endif
  47 #ifdef TARGET_OS_FAMILY_solaris
  48 # include "os_solaris.inline.hpp"
  49 #endif
  50 #ifdef TARGET_OS_FAMILY_windows
  51 # include "os_windows.inline.hpp"
  52 #endif
  53 #ifdef TARGET_OS_FAMILY_bsd
  54 # include "os_bsd.inline.hpp"
  55 #endif
  56 
  57 #if defined(__GNUC__) && !defined(IA64)
  58   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  59   #define ATTR __attribute__((noinline))
  60 #else
  61   #define ATTR
  62 #endif
  63 
  64 
  65 #ifdef DTRACE_ENABLED
  66 
  67 // Only bother with this argument setup if dtrace is available
  68 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  69 
  70 
  71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  72   char* bytes = NULL;                                                      \
  73   int len = 0;                                                             \
  74   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  75   Symbol* klassname = ((oop)obj)->klass()->name();                         \
  76   if (klassname != NULL) {                                                 \
  77     bytes = (char*)klassname->bytes();                                     \
  78     len = klassname->utf8_length();                                        \
  79   }
  80 
  81 #ifndef USDT2
  82 
  83 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
  84   jlong, uintptr_t, char*, int);


 296 
 297 bool ObjectMonitor::try_enter(Thread* THREAD) {
 298   if (THREAD != _owner) {
 299     if (THREAD->is_lock_owned ((address)_owner)) {
 300        assert(_recursions == 0, "internal state error");
 301        _owner = THREAD ;
 302        _recursions = 1 ;
 303        OwnerIsThread = 1 ;
 304        return true;
 305     }
 306     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
 307       return false;
 308     }
 309     return true;
 310   } else {
 311     _recursions++;
 312     return true;
 313   }
 314 }
 315 
 316 void ATTR ObjectMonitor::enter(TRAPS) {
 317   // The following code is ordered to check the most common cases first
 318   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 319   Thread * const Self = THREAD ;
 320   void * cur ;
 321 
 322   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
 323   if (cur == NULL) {
 324      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 325      assert (_recursions == 0   , "invariant") ;
 326      assert (_owner      == Self, "invariant") ;
 327      // CONSIDER: set or assert OwnerIsThread == 1
 328      return ;
 329   }
 330 
 331   if (cur == Self) {
 332      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 333      _recursions ++ ;
 334      return ;
 335   }
 336 


 459 
 460 int ObjectMonitor::TryLock (Thread * Self) {
 461    for (;;) {
 462       void * own = _owner ;
 463       if (own != NULL) return 0 ;
 464       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 465          // Either guarantee _recursions == 0 or set _recursions = 0.
 466          assert (_recursions == 0, "invariant") ;
 467          assert (_owner == Self, "invariant") ;
 468          // CONSIDER: set or assert that OwnerIsThread == 1
 469          return 1 ;
 470       }
 471       // The lock had been free momentarily, but we lost the race to the lock.
 472       // Interference -- the CAS failed.
 473       // We can either return -1 or retry.
 474       // Retry doesn't make as much sense because the lock was just acquired.
 475       if (true) return -1 ;
 476    }
 477 }
 478 
 479 void ATTR ObjectMonitor::EnterI (TRAPS) {
 480     Thread * Self = THREAD ;
 481     assert (Self->is_Java_thread(), "invariant") ;
 482     assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
 483 
 484     // Try the lock - TATAS
 485     if (TryLock (Self) > 0) {
 486         assert (_succ != Self              , "invariant") ;
 487         assert (_owner == Self             , "invariant") ;
 488         assert (_Responsible != Self       , "invariant") ;
 489         return ;
 490     }
 491 
 492     DeferredInitialize () ;
 493 
 494     // We try one round of spinning *before* enqueueing Self.
 495     //
 496     // If the _owner is ready but OFFPROC we could use a YieldTo()
 497     // operation to donate the remainder of this thread's quantum
 498     // to the owner.  This has subtle but beneficial affinity
 499     // effects.


 708     //
 709     // Critically, any prior STs to _succ or EntryList must be visible before
 710     // the ST of null into _owner in the *subsequent* (following) corresponding
 711     // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 712     // execute a serializing instruction.
 713 
 714     if (SyncFlags & 8) {
 715        OrderAccess::fence() ;
 716     }
 717     return ;
 718 }
 719 
 720 // ReenterI() is a specialized inline form of the latter half of the
 721 // contended slow-path from EnterI().  We use ReenterI() only for
 722 // monitor reentry in wait().
 723 //
 724 // In the future we should reconcile EnterI() and ReenterI(), adding
 725 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 726 // loop accordingly.
 727 
 728 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
 729     assert (Self != NULL                , "invariant") ;
 730     assert (SelfNode != NULL            , "invariant") ;
 731     assert (SelfNode->_thread == Self   , "invariant") ;
 732     assert (_waiters > 0                , "invariant") ;
 733     assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
 734     assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
 735     JavaThread * jt = (JavaThread *) Self ;
 736 
 737     int nWakeups = 0 ;
 738     for (;;) {
 739         ObjectWaiter::TStates v = SelfNode->TState ;
 740         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
 741         assert    (_owner != Self, "invariant") ;
 742 
 743         if (TryLock (Self) > 0) break ;
 744         if (TrySpin (Self) > 0) break ;
 745 
 746         TEVENT (Wait Reentry - parking) ;
 747 
 748         // State transition wrappers around park() ...


 913 //
 914 // The CAS() in enter provides for safety and exclusion, while the CAS or
 915 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 916 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
 917 // We detect and recover from stranding with timers.
 918 //
 919 // If a thread transiently strands it'll park until (a) another
 920 // thread acquires the lock and then drops the lock, at which time the
 921 // exiting thread will notice and unpark the stranded thread, or, (b)
 922 // the timer expires.  If the lock is high traffic then the stranding latency
 923 // will be low due to (a).  If the lock is low traffic then the odds of
 924 // stranding are lower, although the worst-case stranding latency
 925 // is longer.  Critically, we don't want to put excessive load in the
 926 // platform's timer subsystem.  We want to minimize both the timer injection
 927 // rate (timers created/sec) as well as the number of timers active at
 928 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 929 // the integral of the # of active timers at any instant over time).
 930 // Both impinge on OS scalability.  Given that, at most one thread parked on
 931 // a monitor will use a timer.
 932 
 933 void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
 934    Thread * Self = THREAD ;
 935    if (THREAD != _owner) {
 936      if (THREAD->is_lock_owned((address) _owner)) {
 937        // Transmute _owner from a BasicLock pointer to a Thread address.
 938        // We don't need to hold _mutex for this transition.
 939        // Non-null to Non-null is safe as long as all readers can
 940        // tolerate either flavor.
 941        assert (_recursions == 0, "invariant") ;
 942        _owner = THREAD ;
 943        _recursions = 0 ;
 944        OwnerIsThread = 1 ;
 945      } else {
 946        // NOTE: we need to handle unbalanced monitor enter/exit
 947        // in native code by throwing an exception.
 948        // TODO: Throw an IllegalMonitorStateException ?
 949        TEVENT (Exit - Throw IMSX) ;
 950        assert(false, "Non-balanced monitor enter/exit!");
 951        if (false) {
 952           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
 953        }




  37 #include "runtime/thread.inline.hpp"
  38 #include "services/threadService.hpp"
  39 #include "trace/tracing.hpp"
  40 #include "trace/traceMacros.hpp"
  41 #include "utilities/dtrace.hpp"
  42 #include "utilities/macros.hpp"
  43 #include "utilities/preserveException.hpp"
  44 #ifdef TARGET_OS_FAMILY_linux
  45 # include "os_linux.inline.hpp"
  46 #endif
  47 #ifdef TARGET_OS_FAMILY_solaris
  48 # include "os_solaris.inline.hpp"
  49 #endif
  50 #ifdef TARGET_OS_FAMILY_windows
  51 # include "os_windows.inline.hpp"
  52 #endif
  53 #ifdef TARGET_OS_FAMILY_bsd
  54 # include "os_bsd.inline.hpp"
  55 #endif
  56 








  57 #ifdef DTRACE_ENABLED
  58 
  59 // Only bother with this argument setup if dtrace is available
  60 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  61 
  62 
  63 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  64   char* bytes = NULL;                                                      \
  65   int len = 0;                                                             \
  66   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  67   Symbol* klassname = ((oop)obj)->klass()->name();                         \
  68   if (klassname != NULL) {                                                 \
  69     bytes = (char*)klassname->bytes();                                     \
  70     len = klassname->utf8_length();                                        \
  71   }
  72 
  73 #ifndef USDT2
  74 
  75 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
  76   jlong, uintptr_t, char*, int);


 288 
 289 bool ObjectMonitor::try_enter(Thread* THREAD) {
 290   if (THREAD != _owner) {
 291     if (THREAD->is_lock_owned ((address)_owner)) {
 292        assert(_recursions == 0, "internal state error");
 293        _owner = THREAD ;
 294        _recursions = 1 ;
 295        OwnerIsThread = 1 ;
 296        return true;
 297     }
 298     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
 299       return false;
 300     }
 301     return true;
 302   } else {
 303     _recursions++;
 304     return true;
 305   }
 306 }
 307 
 308 void ObjectMonitor::enter(TRAPS) {
 309   // The following code is ordered to check the most common cases first
 310   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 311   Thread * const Self = THREAD ;
 312   void * cur ;
 313 
 314   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
 315   if (cur == NULL) {
 316      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 317      assert (_recursions == 0   , "invariant") ;
 318      assert (_owner      == Self, "invariant") ;
 319      // CONSIDER: set or assert OwnerIsThread == 1
 320      return ;
 321   }
 322 
 323   if (cur == Self) {
 324      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 325      _recursions ++ ;
 326      return ;
 327   }
 328 


 451 
 452 int ObjectMonitor::TryLock (Thread * Self) {
 453    for (;;) {
 454       void * own = _owner ;
 455       if (own != NULL) return 0 ;
 456       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 457          // Either guarantee _recursions == 0 or set _recursions = 0.
 458          assert (_recursions == 0, "invariant") ;
 459          assert (_owner == Self, "invariant") ;
 460          // CONSIDER: set or assert that OwnerIsThread == 1
 461          return 1 ;
 462       }
 463       // The lock had been free momentarily, but we lost the race to the lock.
 464       // Interference -- the CAS failed.
 465       // We can either return -1 or retry.
 466       // Retry doesn't make as much sense because the lock was just acquired.
 467       if (true) return -1 ;
 468    }
 469 }
 470 
 471 void ObjectMonitor::EnterI (TRAPS) {
 472     Thread * Self = THREAD ;
 473     assert (Self->is_Java_thread(), "invariant") ;
 474     assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
 475 
 476     // Try the lock - TATAS
 477     if (TryLock (Self) > 0) {
 478         assert (_succ != Self              , "invariant") ;
 479         assert (_owner == Self             , "invariant") ;
 480         assert (_Responsible != Self       , "invariant") ;
 481         return ;
 482     }
 483 
 484     DeferredInitialize () ;
 485 
 486     // We try one round of spinning *before* enqueueing Self.
 487     //
 488     // If the _owner is ready but OFFPROC we could use a YieldTo()
 489     // operation to donate the remainder of this thread's quantum
 490     // to the owner.  This has subtle but beneficial affinity
 491     // effects.


 700     //
 701     // Critically, any prior STs to _succ or EntryList must be visible before
 702     // the ST of null into _owner in the *subsequent* (following) corresponding
 703     // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 704     // execute a serializing instruction.
 705 
 706     if (SyncFlags & 8) {
 707        OrderAccess::fence() ;
 708     }
 709     return ;
 710 }
 711 
 712 // ReenterI() is a specialized inline form of the latter half of the
 713 // contended slow-path from EnterI().  We use ReenterI() only for
 714 // monitor reentry in wait().
 715 //
 716 // In the future we should reconcile EnterI() and ReenterI(), adding
 717 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 718 // loop accordingly.
 719 
 720 void ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
 721     assert (Self != NULL                , "invariant") ;
 722     assert (SelfNode != NULL            , "invariant") ;
 723     assert (SelfNode->_thread == Self   , "invariant") ;
 724     assert (_waiters > 0                , "invariant") ;
 725     assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
 726     assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
 727     JavaThread * jt = (JavaThread *) Self ;
 728 
 729     int nWakeups = 0 ;
 730     for (;;) {
 731         ObjectWaiter::TStates v = SelfNode->TState ;
 732         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
 733         assert    (_owner != Self, "invariant") ;
 734 
 735         if (TryLock (Self) > 0) break ;
 736         if (TrySpin (Self) > 0) break ;
 737 
 738         TEVENT (Wait Reentry - parking) ;
 739 
 740         // State transition wrappers around park() ...


 905 //
 906 // The CAS() in enter provides for safety and exclusion, while the CAS or
 907 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 908 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
 909 // We detect and recover from stranding with timers.
 910 //
 911 // If a thread transiently strands it'll park until (a) another
 912 // thread acquires the lock and then drops the lock, at which time the
 913 // exiting thread will notice and unpark the stranded thread, or, (b)
 914 // the timer expires.  If the lock is high traffic then the stranding latency
 915 // will be low due to (a).  If the lock is low traffic then the odds of
 916 // stranding are lower, although the worst-case stranding latency
 917 // is longer.  Critically, we don't want to put excessive load in the
 918 // platform's timer subsystem.  We want to minimize both the timer injection
 919 // rate (timers created/sec) as well as the number of timers active at
 920 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 921 // the integral of the # of active timers at any instant over time).
 922 // Both impinge on OS scalability.  Given that, at most one thread parked on
 923 // a monitor will use a timer.
 924 
 925 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 926    Thread * Self = THREAD ;
 927    if (THREAD != _owner) {
 928      if (THREAD->is_lock_owned((address) _owner)) {
 929        // Transmute _owner from a BasicLock pointer to a Thread address.
 930        // We don't need to hold _mutex for this transition.
 931        // Non-null to Non-null is safe as long as all readers can
 932        // tolerate either flavor.
 933        assert (_recursions == 0, "invariant") ;
 934        _owner = THREAD ;
 935        _recursions = 0 ;
 936        OwnerIsThread = 1 ;
 937      } else {
 938        // NOTE: we need to handle unbalanced monitor enter/exit
 939        // in native code by throwing an exception.
 940        // TODO: Throw an IllegalMonitorStateException ?
 941        TEVENT (Exit - Throw IMSX) ;
 942        assert(false, "Non-balanced monitor enter/exit!");
 943        if (false) {
 944           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
 945        }