< prev index next >

src/share/vm/runtime/synchronizer.cpp

Print this page
rev 13259 : imported patch 8184181.patch


 306         // enter and we have an inflated Java Monitor (ObjectMonitor).
 307         // This is a special case where the Java Monitor was inflated
 308         // after this thread entered the stack-lock recursively. When a
 309         // Java Monitor is inflated, we cannot safely walk the Java
 310         // Monitor owner's stack and update the BasicLocks because a
 311         // Java Monitor can be asynchronously inflated by a thread that
 312         // does not own the Java Monitor.
 313         ObjectMonitor * m = mark->monitor();
 314         assert(((oop)(m->object()))->mark() == mark, "invariant");
 315         assert(m->is_entered(THREAD), "invariant");
 316       }
 317     }
 318 #endif
 319     return;
 320   }
 321 
 322   if (mark == (markOop) lock) {
 323     // If the object is stack-locked by the current thread, try to
 324     // swing the displaced header from the BasicLock back to the mark.
 325     assert(dhw->is_neutral(), "invariant");
 326     if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) {
 327       TEVENT(fast_exit: release stack-lock);
 328       return;
 329     }
 330   }
 331 
 332   // We have to take the slow-path of possible inflation and then exit.
 333   ObjectSynchronizer::inflate(THREAD,
 334                               object,
 335                               inflate_cause_vm_internal)->exit(true, THREAD);
 336 }
 337 
 338 // -----------------------------------------------------------------------------
 339 // Interpreter/Compiler Slow Case
 340 // This routine is used to handle interpreter/compiler slow case
 341 // We don't need to use fast path here, because it must have been
 342 // failed in the interpreter/compiler code.
 343 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 344   markOop mark = obj->mark();
 345   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 346 
 347   if (mark->is_neutral()) {
 348     // Anticipate successful CAS -- the ST of the displaced mark must
 349     // be visible <= the ST performed by the CAS.
 350     lock->set_displaced_header(mark);
 351     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
 352       TEVENT(slow_enter: release stacklock);
 353       return;
 354     }
 355     // Fall through to inflate() ...
 356   } else if (mark->has_locker() &&
 357              THREAD->is_lock_owned((address)mark->locker())) {
 358     assert(lock != mark->locker(), "must not re-lock the same lock");
 359     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 360     lock->set_displaced_header(NULL);
 361     return;
 362   }
 363 
 364   // The object header will never be displaced to this lock,
 365   // so it does not matter what the value is, except that it
 366   // must be non-zero to avoid looking like a re-entrant lock,
 367   // and must not look locked either.
 368   lock->set_displaced_header(markOopDesc::unused_mark());
 369   ObjectSynchronizer::inflate(THREAD,
 370                               obj(),
 371                               inflate_cause_monitor_enter)->enter(THREAD);


 741          Self->is_Java_thread() , "invariant");
 742   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 743          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 744 
 745   ObjectMonitor* monitor = NULL;
 746   markOop temp, test;
 747   intptr_t hash;
 748   markOop mark = ReadStableMark(obj);
 749 
 750   // object should remain ineligible for biased locking
 751   assert(!mark->has_bias_pattern(), "invariant");
 752 
 753   if (mark->is_neutral()) {
 754     hash = mark->hash();              // this is a normal header
 755     if (hash) {                       // if it has hash, just return it
 756       return hash;
 757     }
 758     hash = get_next_hash(Self, obj);  // allocate a new hash code
 759     temp = mark->copy_set_hash(hash); // merge the hash code into header
 760     // use (machine word version) atomic operation to install the hash
 761     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
 762     if (test == mark) {
 763       return hash;
 764     }
 765     // If atomic operation failed, we must inflate the header
 766     // into heavy weight monitor. We could add more code here
 767     // for fast path, but it does not worth the complexity.
 768   } else if (mark->has_monitor()) {
 769     monitor = mark->monitor();
 770     temp = monitor->header();
 771     assert(temp->is_neutral(), "invariant");
 772     hash = temp->hash();
 773     if (hash) {
 774       return hash;
 775     }
 776     // Skip to the following code to reduce code size
 777   } else if (Self->is_lock_owned((address)mark->locker())) {
 778     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 779     assert(temp->is_neutral(), "invariant");
 780     hash = temp->hash();              // by current thread, check if the displaced
 781     if (hash) {                       // header contains hash code


1435     //
1436     // We now use per-thread private objectmonitor free lists.
1437     // These list are reprovisioned from the global free list outside the
1438     // critical INFLATING...ST interval.  A thread can transfer
1439     // multiple objectmonitors en-mass from the global free list to its local free list.
1440     // This reduces coherency traffic and lock contention on the global free list.
1441     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1442     // before or after the CAS(INFLATING) operation.
1443     // See the comments in omAlloc().
1444 
1445     if (mark->has_locker()) {
1446       ObjectMonitor * m = omAlloc(Self);
1447       // Optimistically prepare the objectmonitor - anticipate successful CAS
1448       // We do this before the CAS in order to minimize the length of time
1449       // in which INFLATING appears in the mark.
1450       m->Recycle();
1451       m->_Responsible  = NULL;
1452       m->_recursions   = 0;
1453       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1454 
1455       markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
1456       if (cmp != mark) {
1457         omRelease(Self, m, true);
1458         continue;       // Interference -- just retry
1459       }
1460 
1461       // We've successfully installed INFLATING (0) into the mark-word.
1462       // This is the only case where 0 will appear in a mark-word.
1463       // Only the singular thread that successfully swings the mark-word
1464       // to 0 can perform (or more precisely, complete) inflation.
1465       //
1466       // Why do we CAS a 0 into the mark-word instead of just CASing the
1467       // mark-word from the stack-locked value directly to the new inflated state?
1468       // Consider what happens when a thread unlocks a stack-locked object.
1469       // It attempts to use CAS to swing the displaced header value from the
1470       // on-stack basiclock back into the object header.  Recall also that the
1471       // header value (hashcode, etc) can reside in (a) the object header, or
1472       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1473       // header in an objectMonitor.  The inflate() routine must copy the header
1474       // value from the basiclock on the owner's stack to the objectMonitor, all
1475       // the while preserving the hashCode stability invariants.  If the owner


1530     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1531     // If we know we're inflating for entry it's better to inflate by swinging a
1532     // pre-locked objectMonitor pointer into the object header.   A successful
1533     // CAS inflates the object *and* confers ownership to the inflating thread.
1534     // In the current implementation we use a 2-step mechanism where we CAS()
1535     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1536     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1537     // would be useful.
1538 
1539     assert(mark->is_neutral(), "invariant");
1540     ObjectMonitor * m = omAlloc(Self);
1541     // prepare m for installation - set monitor to initial state
1542     m->Recycle();
1543     m->set_header(mark);
1544     m->set_owner(NULL);
1545     m->set_object(object);
1546     m->_recursions   = 0;
1547     m->_Responsible  = NULL;
1548     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1549 
1550     if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1551       m->set_object(NULL);
1552       m->set_owner(NULL);
1553       m->Recycle();
1554       omRelease(Self, m, true);
1555       m = NULL;
1556       continue;
1557       // interference - the markword changed - just retry.
1558       // The state-transitions are one-way, so there's no chance of
1559       // live-lock -- "Inflated" is an absorbing state.
1560     }
1561 
1562     // Hopefully the performance counters are allocated on distinct
1563     // cache lines to avoid false sharing on MP systems ...
1564     OM_PERFDATA_OP(Inflations, inc());
1565     TEVENT(Inflate: overwrite neutral);
1566     if (log_is_enabled(Debug, monitorinflation)) {
1567       if (object->is_instance()) {
1568         ResourceMark rm;
1569         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1570                                     p2i(object), p2i(object->mark()),




 306         // enter and we have an inflated Java Monitor (ObjectMonitor).
 307         // This is a special case where the Java Monitor was inflated
 308         // after this thread entered the stack-lock recursively. When a
 309         // Java Monitor is inflated, we cannot safely walk the Java
 310         // Monitor owner's stack and update the BasicLocks because a
 311         // Java Monitor can be asynchronously inflated by a thread that
 312         // does not own the Java Monitor.
 313         ObjectMonitor * m = mark->monitor();
 314         assert(((oop)(m->object()))->mark() == mark, "invariant");
 315         assert(m->is_entered(THREAD), "invariant");
 316       }
 317     }
 318 #endif
 319     return;
 320   }
 321 
 322   if (mark == (markOop) lock) {
 323     // If the object is stack-locked by the current thread, try to
 324     // swing the displaced header from the BasicLock back to the mark.
 325     assert(dhw->is_neutral(), "invariant");
 326     if (object->cas_set_mark(dhw, mark) == mark) {
 327       TEVENT(fast_exit: release stack-lock);
 328       return;
 329     }
 330   }
 331 
 332   // We have to take the slow-path of possible inflation and then exit.
 333   ObjectSynchronizer::inflate(THREAD,
 334                               object,
 335                               inflate_cause_vm_internal)->exit(true, THREAD);
 336 }
 337 
 338 // -----------------------------------------------------------------------------
 339 // Interpreter/Compiler Slow Case
 340 // This routine is used to handle interpreter/compiler slow case
 341 // We don't need to use fast path here, because it must have been
 342 // failed in the interpreter/compiler code.
 343 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 344   markOop mark = obj->mark();
 345   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 346 
 347   if (mark->is_neutral()) {
 348     // Anticipate successful CAS -- the ST of the displaced mark must
 349     // be visible <= the ST performed by the CAS.
 350     lock->set_displaced_header(mark);
 351     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
 352       TEVENT(slow_enter: release stacklock);
 353       return;
 354     }
 355     // Fall through to inflate() ...
 356   } else if (mark->has_locker() &&
 357              THREAD->is_lock_owned((address)mark->locker())) {
 358     assert(lock != mark->locker(), "must not re-lock the same lock");
 359     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 360     lock->set_displaced_header(NULL);
 361     return;
 362   }
 363 
 364   // The object header will never be displaced to this lock,
 365   // so it does not matter what the value is, except that it
 366   // must be non-zero to avoid looking like a re-entrant lock,
 367   // and must not look locked either.
 368   lock->set_displaced_header(markOopDesc::unused_mark());
 369   ObjectSynchronizer::inflate(THREAD,
 370                               obj(),
 371                               inflate_cause_monitor_enter)->enter(THREAD);


 741          Self->is_Java_thread() , "invariant");
 742   assert(Universe::verify_in_progress() || DumpSharedSpaces ||
 743          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 744 
 745   ObjectMonitor* monitor = NULL;
 746   markOop temp, test;
 747   intptr_t hash;
 748   markOop mark = ReadStableMark(obj);
 749 
 750   // object should remain ineligible for biased locking
 751   assert(!mark->has_bias_pattern(), "invariant");
 752 
 753   if (mark->is_neutral()) {
 754     hash = mark->hash();              // this is a normal header
 755     if (hash) {                       // if it has hash, just return it
 756       return hash;
 757     }
 758     hash = get_next_hash(Self, obj);  // allocate a new hash code
 759     temp = mark->copy_set_hash(hash); // merge the hash code into header
 760     // use (machine word version) atomic operation to install the hash
 761     test = obj->cas_set_mark(temp, mark);
 762     if (test == mark) {
 763       return hash;
 764     }
 765     // If atomic operation failed, we must inflate the header
 766     // into heavy weight monitor. We could add more code here
 767     // for fast path, but it does not worth the complexity.
 768   } else if (mark->has_monitor()) {
 769     monitor = mark->monitor();
 770     temp = monitor->header();
 771     assert(temp->is_neutral(), "invariant");
 772     hash = temp->hash();
 773     if (hash) {
 774       return hash;
 775     }
 776     // Skip to the following code to reduce code size
 777   } else if (Self->is_lock_owned((address)mark->locker())) {
 778     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 779     assert(temp->is_neutral(), "invariant");
 780     hash = temp->hash();              // by current thread, check if the displaced
 781     if (hash) {                       // header contains hash code


1435     //
1436     // We now use per-thread private objectmonitor free lists.
1437     // These list are reprovisioned from the global free list outside the
1438     // critical INFLATING...ST interval.  A thread can transfer
1439     // multiple objectmonitors en-mass from the global free list to its local free list.
1440     // This reduces coherency traffic and lock contention on the global free list.
1441     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1442     // before or after the CAS(INFLATING) operation.
1443     // See the comments in omAlloc().
1444 
1445     if (mark->has_locker()) {
1446       ObjectMonitor * m = omAlloc(Self);
1447       // Optimistically prepare the objectmonitor - anticipate successful CAS
1448       // We do this before the CAS in order to minimize the length of time
1449       // in which INFLATING appears in the mark.
1450       m->Recycle();
1451       m->_Responsible  = NULL;
1452       m->_recursions   = 0;
1453       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1454 
1455       markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
1456       if (cmp != mark) {
1457         omRelease(Self, m, true);
1458         continue;       // Interference -- just retry
1459       }
1460 
1461       // We've successfully installed INFLATING (0) into the mark-word.
1462       // This is the only case where 0 will appear in a mark-word.
1463       // Only the singular thread that successfully swings the mark-word
1464       // to 0 can perform (or more precisely, complete) inflation.
1465       //
1466       // Why do we CAS a 0 into the mark-word instead of just CASing the
1467       // mark-word from the stack-locked value directly to the new inflated state?
1468       // Consider what happens when a thread unlocks a stack-locked object.
1469       // It attempts to use CAS to swing the displaced header value from the
1470       // on-stack basiclock back into the object header.  Recall also that the
1471       // header value (hashcode, etc) can reside in (a) the object header, or
1472       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1473       // header in an objectMonitor.  The inflate() routine must copy the header
1474       // value from the basiclock on the owner's stack to the objectMonitor, all
1475       // the while preserving the hashCode stability invariants.  If the owner


1530     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1531     // If we know we're inflating for entry it's better to inflate by swinging a
1532     // pre-locked objectMonitor pointer into the object header.   A successful
1533     // CAS inflates the object *and* confers ownership to the inflating thread.
1534     // In the current implementation we use a 2-step mechanism where we CAS()
1535     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1536     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1537     // would be useful.
1538 
1539     assert(mark->is_neutral(), "invariant");
1540     ObjectMonitor * m = omAlloc(Self);
1541     // prepare m for installation - set monitor to initial state
1542     m->Recycle();
1543     m->set_header(mark);
1544     m->set_owner(NULL);
1545     m->set_object(object);
1546     m->_recursions   = 0;
1547     m->_Responsible  = NULL;
1548     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1549 
1550     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1551       m->set_object(NULL);
1552       m->set_owner(NULL);
1553       m->Recycle();
1554       omRelease(Self, m, true);
1555       m = NULL;
1556       continue;
1557       // interference - the markword changed - just retry.
1558       // The state-transitions are one-way, so there's no chance of
1559       // live-lock -- "Inflated" is an absorbing state.
1560     }
1561 
1562     // Hopefully the performance counters are allocated on distinct
1563     // cache lines to avoid false sharing on MP systems ...
1564     OM_PERFDATA_OP(Inflations, inc());
1565     TEVENT(Inflate: overwrite neutral);
1566     if (log_is_enabled(Debug, monitorinflation)) {
1567       if (object->is_instance()) {
1568         ResourceMark rm;
1569         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1570                                     p2i(object), p2i(object->mark()),


< prev index next >