< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page




 111 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 112 
 113 // global list of blocks of monitors
 114 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
 115 // global monitor free list
 116 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 117 // global monitor in-use list, for moribund threads,
 118 // monitors they inflated need to be scanned for deflation
 119 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 120 // count of entries in gOmInUseList
 121 int ObjectSynchronizer::gOmInUseCount = 0;
 122 
 123 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 124 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
 125 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 126 
 127 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
 128                                        const oop,
 129                                        const ObjectSynchronizer::InflateCause);
 130 













 131 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 132 
 133 
 134 // =====================> Quick functions
 135 
 136 // The quick_* forms are special fast-path variants used to improve
 137 // performance.  In the simplest case, a "quick_*" implementation could
 138 // simply return false, in which case the caller will perform the necessary
 139 // state transitions and call the slow-path form.
 140 // The fast-path is designed to handle frequently arising cases in an efficient
 141 // manner and is just a degenerate "optimistic" variant of the slow-path.
 142 // returns true  -- to indicate the call was satisfied.
 143 // returns false -- to indicate the call needs the services of the slow-path.
 144 // A no-loitering ordinance is in effect for code in the quick_* family
 145 // operators: safepoints or indefinite blocking (blocking that might span a
 146 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 147 // entry.
 148 //
 149 // Consider: An interesting optimization is to have the JIT recognize the
 150 // following common idiom:
 151 //   synchronized (someobj) { .... ; notify(); }
 152 // That is, we find a notify() or notifyAll() call that immediately precedes
 153 // the monitorexit operation.  In that case the JIT could fuse the operations
 154 // into a single notifyAndExit() runtime primitive.
 155 
 156 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
 157   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 158   assert(self->is_Java_thread(), "invariant");
 159   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 160   NoSafepointVerifier nsv;
 161   if (obj == NULL) return false;  // slow-path for invalid obj

 162   const markOop mark = obj->mark();
 163 
 164   if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
 165     // Degenerate notify
 166     // stack-locked by caller so by definition the implied waitset is empty.
 167     return true;
 168   }
 169 
 170   if (mark->has_monitor()) {
 171     ObjectMonitor * const mon = mark->monitor();
 172     assert(mon->object() == obj, "invariant");
 173     if (mon->owner() != self) return false;  // slow-path for IMS exception
 174 
 175     if (mon->first_waiter() != NULL) {
 176       // We have one or more waiters. Since this is an inflated monitor
 177       // that we own, we can transfer one or more threads from the waitset
 178       // to the entrylist here and now, avoiding the slow-path.
 179       if (all) {
 180         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
 181       } else {


 192   }
 193 
 194   // biased locking and any other IMS exception states take the slow-path
 195   return false;
 196 }
 197 
 198 
 199 // The LockNode emitted directly at the synchronization site would have
 200 // been too big if it were to have included support for the cases of inflated
 201 // recursive enter and exit, so they go here instead.
 202 // Note that we can't safely call AsyncPrintJavaStack() from within
 203 // quick_enter() as our thread state remains _in_Java.
 204 
 205 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
 206                                      BasicLock * lock) {
 207   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 208   assert(Self->is_Java_thread(), "invariant");
 209   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
 210   NoSafepointVerifier nsv;
 211   if (obj == NULL) return false;       // Need to throw NPE

 212   const markOop mark = obj->mark();
 213 
 214   if (mark->has_monitor()) {
 215     ObjectMonitor * const m = mark->monitor();
 216     assert(m->object() == obj, "invariant");
 217     Thread * const owner = (Thread *) m->_owner;
 218 
 219     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 220     // and observability
 221     // Case: light contention possibly amenable to TLE
 222     // Case: TLE inimical operations such as nested/recursive synchronization
 223 
 224     if (owner == Self) {
 225       m->_recursions++;
 226       return true;
 227     }
 228 
 229     // This Java Monitor is inflated so obj's header will never be
 230     // displaced to this thread's BasicLock. Make the displaced header
 231     // non-NULL so this BasicLock is not seen as recursive nor as


 247 
 248   // Note that we could inflate in quick_enter.
 249   // This is likely a useful optimization
 250   // Critically, in quick_enter() we must not:
 251   // -- perform bias revocation, or
 252   // -- block indefinitely, or
 253   // -- reach a safepoint
 254 
 255   return false;        // revert to slow-path
 256 }
 257 
 258 // -----------------------------------------------------------------------------
 259 //  Fast Monitor Enter/Exit
 260 // This the fast monitor enter. The interpreter and compiler use
 261 // some assembly copies of this code. Make sure update those code
 262 // if the following function is changed. The implementation is
 263 // extremely sensitive to race condition. Be careful.
 264 
 265 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
 266                                     bool attempt_rebias, TRAPS) {

 267   if (UseBiasedLocking) {
 268     if (!SafepointSynchronize::is_at_safepoint()) {
 269       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
 270       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
 271         return;
 272       }
 273     } else {
 274       assert(!attempt_rebias, "can not rebias toward VM thread");
 275       BiasedLocking::revoke_at_safepoint(obj);
 276     }
 277     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 278   }
 279 
 280   slow_enter(obj, lock, THREAD);
 281 }
 282 
 283 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
 284   markOop mark = object->mark();

 285   // We cannot check for Biased Locking if we are racing an inflation.
 286   assert(mark == markOopDesc::INFLATING() ||
 287          !mark->has_bias_pattern(), "should not see bias pattern here");
 288 
 289   markOop dhw = lock->displaced_header();
 290   if (dhw == NULL) {
 291     // If the displaced header is NULL, then this exit matches up with
 292     // a recursive enter. No real work to do here except for diagnostics.
 293 #ifndef PRODUCT
 294     if (mark != markOopDesc::INFLATING()) {
 295       // Only do diagnostics if we are not racing an inflation. Simply
 296       // exiting a recursive enter of a Java Monitor that is being
 297       // inflated is safe; see the has_monitor() comment below.
 298       assert(!mark->is_neutral(), "invariant");
 299       assert(!mark->has_locker() ||
 300              THREAD->is_lock_owned((address)mark->locker()), "invariant");
 301       if (mark->has_monitor()) {
 302         // The BasicLock's displaced_header is marked as a recursive
 303         // enter and we have an inflated Java Monitor (ObjectMonitor).
 304         // This is a special case where the Java Monitor was inflated


 321     // swing the displaced header from the BasicLock back to the mark.
 322     assert(dhw->is_neutral(), "invariant");
 323     if (object->cas_set_mark(dhw, mark) == mark) {
 324       TEVENT(fast_exit: release stack-lock);
 325       return;
 326     }
 327   }
 328 
 329   // We have to take the slow-path of possible inflation and then exit.
 330   ObjectSynchronizer::inflate(THREAD,
 331                               object,
 332                               inflate_cause_vm_internal)->exit(true, THREAD);
 333 }
 334 
 335 // -----------------------------------------------------------------------------
 336 // Interpreter/Compiler Slow Case
 337 // This routine is used to handle interpreter/compiler slow case
 338 // We don't need to use fast path here, because it must have been
 339 // failed in the interpreter/compiler code.
 340 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {

 341   markOop mark = obj->mark();
 342   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 343 
 344   if (mark->is_neutral()) {
 345     // Anticipate successful CAS -- the ST of the displaced mark must
 346     // be visible <= the ST performed by the CAS.
 347     lock->set_displaced_header(mark);
 348     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
 349       TEVENT(slow_enter: release stacklock);
 350       return;
 351     }
 352     // Fall through to inflate() ...
 353   } else if (mark->has_locker() &&
 354              THREAD->is_lock_owned((address)mark->locker())) {
 355     assert(lock != mark->locker(), "must not re-lock the same lock");
 356     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 357     lock->set_displaced_header(NULL);
 358     return;
 359   }
 360 


 373 // failed in the interpreter/compiler code. Simply use the heavy
 374 // weight monitor should be ok, unless someone find otherwise.
 375 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 376   fast_exit(object, lock, THREAD);
 377 }
 378 
 379 // -----------------------------------------------------------------------------
 380 // Class Loader  support to workaround deadlocks on the class loader lock objects
 381 // Also used by GC
 382 // complete_exit()/reenter() are used to wait on a nested lock
 383 // i.e. to give up an outer lock completely and then re-enter
 384 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 385 //  1) complete_exit lock1 - saving recursion count
 386 //  2) wait on lock2
 387 //  3) when notified on lock2, unlock lock2
 388 //  4) reenter lock1 with original recursion count
 389 //  5) lock lock2
 390 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 391 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 392   TEVENT(complete_exit);

 393   if (UseBiasedLocking) {
 394     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 395     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 396   }
 397 
 398   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 399                                                        obj(),
 400                                                        inflate_cause_vm_internal);
 401 
 402   return monitor->complete_exit(THREAD);
 403 }
 404 
 405 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 406 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 407   TEVENT(reenter);

 408   if (UseBiasedLocking) {
 409     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 410     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 411   }
 412 
 413   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 414                                                        obj(),
 415                                                        inflate_cause_vm_internal);
 416 
 417   monitor->reenter(recursion, THREAD);
 418 }
 419 // -----------------------------------------------------------------------------
 420 // JNI locks on java objects
 421 // NOTE: must use heavy weight monitor to handle jni monitor enter
 422 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 423   // the current locking is from JNI instead of Java code
 424   TEVENT(jni_enter);

 425   if (UseBiasedLocking) {
 426     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 427     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 428   }
 429   THREAD->set_current_pending_monitor_is_from_java(false);
 430   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
 431   THREAD->set_current_pending_monitor_is_from_java(true);
 432 }
 433 
 434 // NOTE: must use heavy weight monitor to handle jni monitor exit
 435 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 436   TEVENT(jni_exit);

 437   if (UseBiasedLocking) {
 438     Handle h_obj(THREAD, obj);
 439     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 440     obj = h_obj();
 441   }
 442   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 443 
 444   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 445                                                        obj,
 446                                                        inflate_cause_jni_exit);
 447   // If this thread has locked the object, exit the monitor.  Note:  can't use
 448   // monitor->check(CHECK); must exit even if an exception is pending.
 449   if (monitor->check(THREAD)) {
 450     monitor->exit(true, THREAD);
 451   }
 452 }
 453 
 454 // -----------------------------------------------------------------------------
 455 // Internal VM locks on java objects
 456 // standard constructor, allows locking failures


 461   _obj = obj;
 462 
 463   if (_dolock) {
 464     TEVENT(ObjectLocker);
 465 
 466     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 467   }
 468 }
 469 
 470 ObjectLocker::~ObjectLocker() {
 471   if (_dolock) {
 472     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 473   }
 474 }
 475 
 476 
 477 // -----------------------------------------------------------------------------
 478 //  Wait/Notify/NotifyAll
 479 // NOTE: must use heavy weight monitor to handle wait()
 480 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {

 481   if (UseBiasedLocking) {
 482     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 483     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 484   }
 485   if (millis < 0) {
 486     TEVENT(wait - throw IAX);
 487     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 488   }
 489   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 490                                                        obj(),
 491                                                        inflate_cause_wait);
 492 
 493   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 494   monitor->wait(millis, true, THREAD);
 495 
 496   // This dummy call is in place to get around dtrace bug 6254741.  Once
 497   // that's fixed we can uncomment the following line, remove the call
 498   // and change this function back into a "void" func.
 499   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 500   return dtrace_waited_probe(monitor, obj, THREAD);
 501 }
 502 
 503 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {

 504   if (UseBiasedLocking) {
 505     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 506     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 507   }
 508   if (millis < 0) {
 509     TEVENT(wait - throw IAX);
 510     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 511   }
 512   ObjectSynchronizer::inflate(THREAD,
 513                               obj(),
 514                               inflate_cause_wait)->wait(millis, false, THREAD);
 515 }
 516 
 517 void ObjectSynchronizer::notify(Handle obj, TRAPS) {

 518   if (UseBiasedLocking) {
 519     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 520     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 521   }
 522 
 523   markOop mark = obj->mark();
 524   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 525     return;
 526   }
 527   ObjectSynchronizer::inflate(THREAD,
 528                               obj(),
 529                               inflate_cause_notify)->notify(THREAD);
 530 }
 531 
 532 // NOTE: see comment of notify()
 533 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {

 534   if (UseBiasedLocking) {
 535     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 536     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 537   }
 538 
 539   markOop mark = obj->mark();
 540   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 541     return;
 542   }
 543   ObjectSynchronizer::inflate(THREAD,
 544                               obj(),
 545                               inflate_cause_notify)->notifyAll(THREAD);
 546 }
 547 
 548 // -----------------------------------------------------------------------------
 549 // Hash Code handling
 550 //
 551 // Performance concern:
 552 // OrderAccess::storestore() calls release() which at one time stored 0
 553 // into the global volatile OrderAccess::dummy variable. This store was


 692     // likely make this the default in future releases.
 693     unsigned t = Self->_hashStateX;
 694     t ^= (t << 11);
 695     Self->_hashStateX = Self->_hashStateY;
 696     Self->_hashStateY = Self->_hashStateZ;
 697     Self->_hashStateZ = Self->_hashStateW;
 698     unsigned v = Self->_hashStateW;
 699     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 700     Self->_hashStateW = v;
 701     value = v;
 702   }
 703 
 704   value &= markOopDesc::hash_mask;
 705   if (value == 0) value = 0xBAD;
 706   assert(value != markOopDesc::no_hash, "invariant");
 707   TEVENT(hashCode: GENERATE);
 708   return value;
 709 }
 710 
 711 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {








 712   if (UseBiasedLocking) {
 713     // NOTE: many places throughout the JVM do not expect a safepoint
 714     // to be taken here, in particular most operations on perm gen
 715     // objects. However, we only ever bias Java instances and all of
 716     // the call sites of identity_hash that might revoke biases have
 717     // been checked to make sure they can handle a safepoint. The
 718     // added check of the bias pattern is to avoid useless calls to
 719     // thread-local storage.
 720     if (obj->mark()->has_bias_pattern()) {
 721       // Handle for oop obj in case of STW safepoint
 722       Handle hobj(Self, obj);
 723       // Relaxing assertion for bug 6320749.
 724       assert(Universe::verify_in_progress() ||
 725              !SafepointSynchronize::is_at_safepoint(),
 726              "biases should not be seen by VM thread here");
 727       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 728       obj = hobj();
 729       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 730     }
 731   }


 796   assert(mark->is_neutral(), "invariant");
 797   hash = mark->hash();
 798   if (hash == 0) {
 799     hash = get_next_hash(Self, obj);
 800     temp = mark->copy_set_hash(hash); // merge hash code into header
 801     assert(temp->is_neutral(), "invariant");
 802     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
 803     if (test != mark) {
 804       // The only update to the header in the monitor (outside GC)
 805       // is install the hash code. If someone add new usage of
 806       // displaced header, please update this code
 807       hash = test->hash();
 808       assert(test->is_neutral(), "invariant");
 809       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 810     }
 811   }
 812   // We finally get the hash
 813   return hash;
 814 }
 815 
 816 // Deprecated -- use FastHashCode() instead.
 817 
 818 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 819   return FastHashCode(Thread::current(), obj());
 820 }
 821 
 822 
 823 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 824                                                    Handle h_obj) {
 825   if (UseBiasedLocking) {
 826     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
 827     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 828   }
 829 
 830   assert(thread == JavaThread::current(), "Can only be called on current thread");
 831   oop obj = h_obj();
 832 
 833   markOop mark = ReadStableMark(obj);
 834 
 835   // Uncontended case, header points to stack
 836   if (mark->has_locker()) {
 837     return thread->is_lock_owned((address)mark->locker());
 838   }
 839   // Contended case, header points to ObjectMonitor (tagged pointer)
 840   if (mark->has_monitor()) {
 841     ObjectMonitor* monitor = mark->monitor();


1365 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1366   markOop mark = obj->mark();
1367   if (mark->has_monitor()) {
1368     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1369     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1370     return mark->monitor();
1371   }
1372   return ObjectSynchronizer::inflate(Thread::current(),
1373                                      obj,
1374                                      inflate_cause_vm_internal);
1375 }
1376 
1377 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1378                                                      oop object,
1379                                                      const InflateCause cause) {
1380 
1381   // Inflate mutates the heap ...
1382   // Relaxing assertion for bug 6320749.
1383   assert(Universe::verify_in_progress() ||
1384          !SafepointSynchronize::is_at_safepoint(), "invariant");




1385 
1386   EventJavaMonitorInflate event;
1387 
1388   for (;;) {
1389     const markOop mark = object->mark();
1390     assert(!mark->has_bias_pattern(), "invariant");
1391 
1392     // The mark can be in one of the following states:
1393     // *  Inflated     - just return
1394     // *  Stack-locked - coerce it to inflated
1395     // *  INFLATING    - busy wait for conversion to complete
1396     // *  Neutral      - aggressively inflate the object.
1397     // *  BIASED       - Illegal.  We should never see this
1398 
1399     // CASE: inflated
1400     if (mark->has_monitor()) {
1401       ObjectMonitor * inf = mark->monitor();
1402       assert(inf->header()->is_neutral(), "invariant");
1403       assert(inf->object() == object, "invariant");
1404       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");




 111 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 112 
 113 // global list of blocks of monitors
 114 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
 115 // global monitor free list
 116 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 117 // global monitor in-use list, for moribund threads,
 118 // monitors they inflated need to be scanned for deflation
 119 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 120 // count of entries in gOmInUseList
 121 int ObjectSynchronizer::gOmInUseCount = 0;
 122 
 123 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 124 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
 125 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 126 
 127 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
 128                                        const oop,
 129                                        const ObjectSynchronizer::InflateCause);
 130 
 131 #define CHECK_THROW_VALUE_TYPE_IMSE(obj)  \
 132   if ((obj)->klass_is_value_type()) {     \
 133     ResourceMark rm(THREAD);              \
 134     THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 135   }
 136 
 137 #define CHECK_THROW_VALUE_TYPE_IMSE_0(obj)  \
 138   if ((obj)->klass_is_value_type()) {     \
 139     ResourceMark rm(THREAD);              \
 140     THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
 141   }
 142 
 143 
 144 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 145 
 146 
 147 // =====================> Quick functions
 148 
 149 // The quick_* forms are special fast-path variants used to improve
 150 // performance.  In the simplest case, a "quick_*" implementation could
 151 // simply return false, in which case the caller will perform the necessary
 152 // state transitions and call the slow-path form.
 153 // The fast-path is designed to handle frequently arising cases in an efficient
 154 // manner and is just a degenerate "optimistic" variant of the slow-path.
 155 // returns true  -- to indicate the call was satisfied.
 156 // returns false -- to indicate the call needs the services of the slow-path.
 157 // A no-loitering ordinance is in effect for code in the quick_* family
 158 // operators: safepoints or indefinite blocking (blocking that might span a
 159 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
 160 // entry.
 161 //
 162 // Consider: An interesting optimization is to have the JIT recognize the
 163 // following common idiom:
 164 //   synchronized (someobj) { .... ; notify(); }
 165 // That is, we find a notify() or notifyAll() call that immediately precedes
 166 // the monitorexit operation.  In that case the JIT could fuse the operations
 167 // into a single notifyAndExit() runtime primitive.
 168 
 169 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
 170   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 171   assert(self->is_Java_thread(), "invariant");
 172   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 173   NoSafepointVerifier nsv;
 174   if (obj == NULL) return false;  // slow-path for invalid obj
 175   assert(!obj->klass_is_value_type(), "monitor op on value type");
 176   const markOop mark = obj->mark();
 177 
 178   if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
 179     // Degenerate notify
 180     // stack-locked by caller so by definition the implied waitset is empty.
 181     return true;
 182   }
 183 
 184   if (mark->has_monitor()) {
 185     ObjectMonitor * const mon = mark->monitor();
 186     assert(mon->object() == obj, "invariant");
 187     if (mon->owner() != self) return false;  // slow-path for IMS exception
 188 
 189     if (mon->first_waiter() != NULL) {
 190       // We have one or more waiters. Since this is an inflated monitor
 191       // that we own, we can transfer one or more threads from the waitset
 192       // to the entrylist here and now, avoiding the slow-path.
 193       if (all) {
 194         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
 195       } else {


 206   }
 207 
 208   // biased locking and any other IMS exception states take the slow-path
 209   return false;
 210 }
 211 
 212 
 213 // The LockNode emitted directly at the synchronization site would have
 214 // been too big if it were to have included support for the cases of inflated
 215 // recursive enter and exit, so they go here instead.
 216 // Note that we can't safely call AsyncPrintJavaStack() from within
 217 // quick_enter() as our thread state remains _in_Java.
 218 
 219 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
 220                                      BasicLock * lock) {
 221   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 222   assert(Self->is_Java_thread(), "invariant");
 223   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
 224   NoSafepointVerifier nsv;
 225   if (obj == NULL) return false;       // Need to throw NPE
 226   assert(!obj->klass_is_value_type(), "monitor op on value type");
 227   const markOop mark = obj->mark();
 228 
 229   if (mark->has_monitor()) {
 230     ObjectMonitor * const m = mark->monitor();
 231     assert(m->object() == obj, "invariant");
 232     Thread * const owner = (Thread *) m->_owner;
 233 
 234     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 235     // and observability
 236     // Case: light contention possibly amenable to TLE
 237     // Case: TLE inimical operations such as nested/recursive synchronization
 238 
 239     if (owner == Self) {
 240       m->_recursions++;
 241       return true;
 242     }
 243 
 244     // This Java Monitor is inflated so obj's header will never be
 245     // displaced to this thread's BasicLock. Make the displaced header
 246     // non-NULL so this BasicLock is not seen as recursive nor as


 262 
 263   // Note that we could inflate in quick_enter.
 264   // This is likely a useful optimization
 265   // Critically, in quick_enter() we must not:
 266   // -- perform bias revocation, or
 267   // -- block indefinitely, or
 268   // -- reach a safepoint
 269 
 270   return false;        // revert to slow-path
 271 }
 272 
 273 // -----------------------------------------------------------------------------
 274 //  Fast Monitor Enter/Exit
 275 // This the fast monitor enter. The interpreter and compiler use
 276 // some assembly copies of this code. Make sure update those code
 277 // if the following function is changed. The implementation is
 278 // extremely sensitive to race condition. Be careful.
 279 
 280 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
 281                                     bool attempt_rebias, TRAPS) {
 282   assert(!obj->klass_is_value_type(), "monitor op on value type");
 283   if (UseBiasedLocking) {
 284     if (!SafepointSynchronize::is_at_safepoint()) {
 285       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
 286       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
 287         return;
 288       }
 289     } else {
 290       assert(!attempt_rebias, "can not rebias toward VM thread");
 291       BiasedLocking::revoke_at_safepoint(obj);
 292     }
 293     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 294   }
 295 
 296   slow_enter(obj, lock, THREAD);
 297 }
 298 
 299 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
 300   markOop mark = object->mark();
 301   assert(!object->klass_is_value_type(), "monitor op on value type");
 302   // We cannot check for Biased Locking if we are racing an inflation.
 303   assert(mark == markOopDesc::INFLATING() ||
 304          !mark->has_bias_pattern(), "should not see bias pattern here");
 305 
 306   markOop dhw = lock->displaced_header();
 307   if (dhw == NULL) {
 308     // If the displaced header is NULL, then this exit matches up with
 309     // a recursive enter. No real work to do here except for diagnostics.
 310 #ifndef PRODUCT
 311     if (mark != markOopDesc::INFLATING()) {
 312       // Only do diagnostics if we are not racing an inflation. Simply
 313       // exiting a recursive enter of a Java Monitor that is being
 314       // inflated is safe; see the has_monitor() comment below.
 315       assert(!mark->is_neutral(), "invariant");
 316       assert(!mark->has_locker() ||
 317              THREAD->is_lock_owned((address)mark->locker()), "invariant");
 318       if (mark->has_monitor()) {
 319         // The BasicLock's displaced_header is marked as a recursive
 320         // enter and we have an inflated Java Monitor (ObjectMonitor).
 321         // This is a special case where the Java Monitor was inflated


 338     // swing the displaced header from the BasicLock back to the mark.
 339     assert(dhw->is_neutral(), "invariant");
 340     if (object->cas_set_mark(dhw, mark) == mark) {
 341       TEVENT(fast_exit: release stack-lock);
 342       return;
 343     }
 344   }
 345 
 346   // We have to take the slow-path of possible inflation and then exit.
 347   ObjectSynchronizer::inflate(THREAD,
 348                               object,
 349                               inflate_cause_vm_internal)->exit(true, THREAD);
 350 }
 351 
 352 // -----------------------------------------------------------------------------
 353 // Interpreter/Compiler Slow Case
 354 // This routine is used to handle interpreter/compiler slow case
 355 // We don't need to use fast path here, because it must have been
 356 // failed in the interpreter/compiler code.
 357 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 358   CHECK_THROW_VALUE_TYPE_IMSE(obj);
 359   markOop mark = obj->mark();
 360   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 361 
 362   if (mark->is_neutral()) {
 363     // Anticipate successful CAS -- the ST of the displaced mark must
 364     // be visible <= the ST performed by the CAS.
 365     lock->set_displaced_header(mark);
 366     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
 367       TEVENT(slow_enter: release stacklock);
 368       return;
 369     }
 370     // Fall through to inflate() ...
 371   } else if (mark->has_locker() &&
 372              THREAD->is_lock_owned((address)mark->locker())) {
 373     assert(lock != mark->locker(), "must not re-lock the same lock");
 374     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 375     lock->set_displaced_header(NULL);
 376     return;
 377   }
 378 


 391 // failed in the interpreter/compiler code. Simply use the heavy
 392 // weight monitor should be ok, unless someone find otherwise.
 393 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 394   fast_exit(object, lock, THREAD);
 395 }
 396 
 397 // -----------------------------------------------------------------------------
 398 // Class Loader  support to workaround deadlocks on the class loader lock objects
 399 // Also used by GC
 400 // complete_exit()/reenter() are used to wait on a nested lock
 401 // i.e. to give up an outer lock completely and then re-enter
 402 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 403 //  1) complete_exit lock1 - saving recursion count
 404 //  2) wait on lock2
 405 //  3) when notified on lock2, unlock lock2
 406 //  4) reenter lock1 with original recursion count
 407 //  5) lock lock2
 408 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 409 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 410   TEVENT(complete_exit);
 411   assert(!obj->klass_is_value_type(), "monitor op on value type");
 412   if (UseBiasedLocking) {
 413     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 414     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 415   }
 416 
 417   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 418                                                        obj(),
 419                                                        inflate_cause_vm_internal);
 420 
 421   return monitor->complete_exit(THREAD);
 422 }
 423 
 424 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 425 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 426   TEVENT(reenter);
 427   assert(!obj->klass_is_value_type(), "monitor op on value type");
 428   if (UseBiasedLocking) {
 429     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 430     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 431   }
 432 
 433   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 434                                                        obj(),
 435                                                        inflate_cause_vm_internal);
 436 
 437   monitor->reenter(recursion, THREAD);
 438 }
 439 // -----------------------------------------------------------------------------
 440 // JNI locks on java objects
 441 // NOTE: must use heavy weight monitor to handle jni monitor enter
 442 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 443   // the current locking is from JNI instead of Java code
 444   TEVENT(jni_enter);
 445   CHECK_THROW_VALUE_TYPE_IMSE(obj);
 446   if (UseBiasedLocking) {
 447     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 448     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 449   }
 450   THREAD->set_current_pending_monitor_is_from_java(false);
 451   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
 452   THREAD->set_current_pending_monitor_is_from_java(true);
 453 }
 454 
 455 // NOTE: must use heavy weight monitor to handle jni monitor exit
 456 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 457   TEVENT(jni_exit);
 458   CHECK_THROW_VALUE_TYPE_IMSE(obj);
 459   if (UseBiasedLocking) {
 460     Handle h_obj(THREAD, obj);
 461     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 462     obj = h_obj();
 463   }
 464   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 465 
 466   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 467                                                        obj,
 468                                                        inflate_cause_jni_exit);
 469   // If this thread has locked the object, exit the monitor.  Note:  can't use
 470   // monitor->check(CHECK); must exit even if an exception is pending.
 471   if (monitor->check(THREAD)) {
 472     monitor->exit(true, THREAD);
 473   }
 474 }
 475 
 476 // -----------------------------------------------------------------------------
 477 // Internal VM locks on java objects
 478 // standard constructor, allows locking failures


 483   _obj = obj;
 484 
 485   if (_dolock) {
 486     TEVENT(ObjectLocker);
 487 
 488     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 489   }
 490 }
 491 
 492 ObjectLocker::~ObjectLocker() {
 493   if (_dolock) {
 494     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 495   }
 496 }
 497 
 498 
 499 // -----------------------------------------------------------------------------
 500 //  Wait/Notify/NotifyAll
 501 // NOTE: must use heavy weight monitor to handle wait()
 502 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 503   CHECK_THROW_VALUE_TYPE_IMSE_0(obj);
 504   if (UseBiasedLocking) {
 505     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 506     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 507   }
 508   if (millis < 0) {
 509     TEVENT(wait - throw IAX);
 510     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 511   }
 512   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 513                                                        obj(),
 514                                                        inflate_cause_wait);
 515 
 516   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 517   monitor->wait(millis, true, THREAD);
 518 
 519   // This dummy call is in place to get around dtrace bug 6254741.  Once
 520   // that's fixed we can uncomment the following line, remove the call
 521   // and change this function back into a "void" func.
 522   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 523   return dtrace_waited_probe(monitor, obj, THREAD);
 524 }
 525 
 526 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 527   CHECK_THROW_VALUE_TYPE_IMSE(obj);
 528   if (UseBiasedLocking) {
 529     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 530     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 531   }
 532   if (millis < 0) {
 533     TEVENT(wait - throw IAX);
 534     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 535   }
 536   ObjectSynchronizer::inflate(THREAD,
 537                               obj(),
 538                               inflate_cause_wait)->wait(millis, false, THREAD);
 539 }
 540 
 541 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 542   CHECK_THROW_VALUE_TYPE_IMSE(obj);
 543   if (UseBiasedLocking) {
 544     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 545     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 546   }
 547 
 548   markOop mark = obj->mark();
 549   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 550     return;
 551   }
 552   ObjectSynchronizer::inflate(THREAD,
 553                               obj(),
 554                               inflate_cause_notify)->notify(THREAD);
 555 }
 556 
 557 // NOTE: see comment of notify()
 558 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 559   CHECK_THROW_VALUE_TYPE_IMSE(obj);
 560   if (UseBiasedLocking) {
 561     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 562     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 563   }
 564 
 565   markOop mark = obj->mark();
 566   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 567     return;
 568   }
 569   ObjectSynchronizer::inflate(THREAD,
 570                               obj(),
 571                               inflate_cause_notify)->notifyAll(THREAD);
 572 }
 573 
 574 // -----------------------------------------------------------------------------
 575 // Hash Code handling
 576 //
 577 // Performance concern:
 578 // OrderAccess::storestore() calls release() which at one time stored 0
 579 // into the global volatile OrderAccess::dummy variable. This store was


 718     // likely make this the default in future releases.
 719     unsigned t = Self->_hashStateX;
 720     t ^= (t << 11);
 721     Self->_hashStateX = Self->_hashStateY;
 722     Self->_hashStateY = Self->_hashStateZ;
 723     Self->_hashStateZ = Self->_hashStateW;
 724     unsigned v = Self->_hashStateW;
 725     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 726     Self->_hashStateW = v;
 727     value = v;
 728   }
 729 
 730   value &= markOopDesc::hash_mask;
 731   if (value == 0) value = 0xBAD;
 732   assert(value != markOopDesc::no_hash, "invariant");
 733   TEVENT(hashCode: GENERATE);
 734   return value;
 735 }
 736 
 737 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 738   if (EnableValhalla && obj->klass_is_value_type()) {
 739     // Expected tooling to override hashCode for value type, just don't crash
 740     if (log_is_enabled(Debug, monitorinflation)) {
 741       ResourceMark rm;
 742       log_debug(monitorinflation)("FastHashCode for value type: %s", obj->klass()->external_name());
 743     }
 744     return 0;
 745   }
 746   if (UseBiasedLocking) {
 747     // NOTE: many places throughout the JVM do not expect a safepoint
 748     // to be taken here, in particular most operations on perm gen
 749     // objects. However, we only ever bias Java instances and all of
 750     // the call sites of identity_hash that might revoke biases have
 751     // been checked to make sure they can handle a safepoint. The
 752     // added check of the bias pattern is to avoid useless calls to
 753     // thread-local storage.
 754     if (obj->mark()->has_bias_pattern()) {
 755       // Handle for oop obj in case of STW safepoint
 756       Handle hobj(Self, obj);
 757       // Relaxing assertion for bug 6320749.
 758       assert(Universe::verify_in_progress() ||
 759              !SafepointSynchronize::is_at_safepoint(),
 760              "biases should not be seen by VM thread here");
 761       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 762       obj = hobj();
 763       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 764     }
 765   }


 830   assert(mark->is_neutral(), "invariant");
 831   hash = mark->hash();
 832   if (hash == 0) {
 833     hash = get_next_hash(Self, obj);
 834     temp = mark->copy_set_hash(hash); // merge hash code into header
 835     assert(temp->is_neutral(), "invariant");
 836     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
 837     if (test != mark) {
 838       // The only update to the header in the monitor (outside GC)
 839       // is install the hash code. If someone add new usage of
 840       // displaced header, please update this code
 841       hash = test->hash();
 842       assert(test->is_neutral(), "invariant");
 843       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 844     }
 845   }
 846   // We finally get the hash
 847   return hash;
 848 }
 849 






 850 
 851 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 852                                                    Handle h_obj) {
 853   if (UseBiasedLocking) {
 854     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
 855     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 856   }
 857 
 858   assert(thread == JavaThread::current(), "Can only be called on current thread");
 859   oop obj = h_obj();
 860 
 861   markOop mark = ReadStableMark(obj);
 862 
 863   // Uncontended case, header points to stack
 864   if (mark->has_locker()) {
 865     return thread->is_lock_owned((address)mark->locker());
 866   }
 867   // Contended case, header points to ObjectMonitor (tagged pointer)
 868   if (mark->has_monitor()) {
 869     ObjectMonitor* monitor = mark->monitor();


1393 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1394   markOop mark = obj->mark();
1395   if (mark->has_monitor()) {
1396     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1397     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1398     return mark->monitor();
1399   }
1400   return ObjectSynchronizer::inflate(Thread::current(),
1401                                      obj,
1402                                      inflate_cause_vm_internal);
1403 }
1404 
1405 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1406                                                      oop object,
1407                                                      const InflateCause cause) {
1408 
1409   // Inflate mutates the heap ...
1410   // Relaxing assertion for bug 6320749.
1411   assert(Universe::verify_in_progress() ||
1412          !SafepointSynchronize::is_at_safepoint(), "invariant");
1413 
1414   if (EnableValhalla) {
1415     guarantee(!object->klass_is_value_type(), "Attempt to inflate value type");
1416   }
1417 
1418   EventJavaMonitorInflate event;
1419 
1420   for (;;) {
1421     const markOop mark = object->mark();
1422     assert(!mark->has_bias_pattern(), "invariant");
1423 
1424     // The mark can be in one of the following states:
1425     // *  Inflated     - just return
1426     // *  Stack-locked - coerce it to inflated
1427     // *  INFLATING    - busy wait for conversion to complete
1428     // *  Neutral      - aggressively inflate the object.
1429     // *  BIASED       - Illegal.  We should never see this
1430 
1431     // CASE: inflated
1432     if (mark->has_monitor()) {
1433       ObjectMonitor * inf = mark->monitor();
1434       assert(inf->header()->is_neutral(), "invariant");
1435       assert(inf->object() == object, "invariant");
1436       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");


< prev index next >