< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 49250 : [mq]: JDK-8199781.patch


 153 // That is, we find a notify() or notifyAll() call that immediately precedes
 154 // the monitorexit operation.  In that case the JIT could fuse the operations
 155 // into a single notifyAndExit() runtime primitive.
 156 
 157 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
 158   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 159   assert(self->is_Java_thread(), "invariant");
 160   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 161   NoSafepointVerifier nsv;
 162   if (obj == NULL) return false;  // slow-path for invalid obj
 163   const markOop mark = obj->mark();
 164 
 165   if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
 166     // Degenerate notify
 167     // stack-locked by caller so by definition the implied waitset is empty.
 168     return true;
 169   }
 170 
 171   if (mark->has_monitor()) {
 172     ObjectMonitor * const mon = mark->monitor();
 173     assert(mon->object() == obj, "invariant");
 174     if (mon->owner() != self) return false;  // slow-path for IMS exception
 175 
 176     if (mon->first_waiter() != NULL) {
 177       // We have one or more waiters. Since this is an inflated monitor
 178       // that we own, we can transfer one or more threads from the waitset
 179       // to the entrylist here and now, avoiding the slow-path.
 180       if (all) {
 181         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
 182       } else {
 183         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
 184       }
 185       int tally = 0;
 186       do {
 187         mon->INotify(self);
 188         ++tally;
 189       } while (mon->first_waiter() != NULL && all);
 190       OM_PERFDATA_OP(Notifications, inc(tally));
 191     }
 192     return true;
 193   }


 197 }
 198 
 199 
 200 // The LockNode emitted directly at the synchronization site would have
 201 // been too big if it were to have included support for the cases of inflated
 202 // recursive enter and exit, so they go here instead.
 203 // Note that we can't safely call AsyncPrintJavaStack() from within
 204 // quick_enter() as our thread state remains _in_Java.
 205 
 206 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
 207                                      BasicLock * lock) {
 208   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 209   assert(Self->is_Java_thread(), "invariant");
 210   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
 211   NoSafepointVerifier nsv;
 212   if (obj == NULL) return false;       // Need to throw NPE
 213   const markOop mark = obj->mark();
 214 
 215   if (mark->has_monitor()) {
 216     ObjectMonitor * const m = mark->monitor();
 217     assert(m->object() == obj, "invariant");
 218     Thread * const owner = (Thread *) m->_owner;
 219 
 220     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 221     // and observability
 222     // Case: light contention possibly amenable to TLE
 223     // Case: TLE inimical operations such as nested/recursive synchronization
 224 
 225     if (owner == Self) {
 226       m->_recursions++;
 227       return true;
 228     }
 229 
 230     // This Java Monitor is inflated so obj's header will never be
 231     // displaced to this thread's BasicLock. Make the displaced header
 232     // non-NULL so this BasicLock is not seen as recursive nor as
 233     // being locked. We do this unconditionally so that this thread's
 234     // BasicLock cannot be mis-interpreted by any stack walkers. For
 235     // performance reasons, stack walkers generally first check for
 236     // Biased Locking in the object's header, the second check is for
 237     // stack-locking in the object's header, the third check is for


1384   assert(Universe::verify_in_progress() ||
1385          !SafepointSynchronize::is_at_safepoint(), "invariant");
1386 
1387   EventJavaMonitorInflate event;
1388 
1389   for (;;) {
1390     const markOop mark = object->mark();
1391     assert(!mark->has_bias_pattern(), "invariant");
1392 
1393     // The mark can be in one of the following states:
1394     // *  Inflated     - just return
1395     // *  Stack-locked - coerce it to inflated
1396     // *  INFLATING    - busy wait for conversion to complete
1397     // *  Neutral      - aggressively inflate the object.
1398     // *  BIASED       - Illegal.  We should never see this
1399 
1400     // CASE: inflated
1401     if (mark->has_monitor()) {
1402       ObjectMonitor * inf = mark->monitor();
1403       assert(inf->header()->is_neutral(), "invariant");
1404       assert(inf->object() == object, "invariant");
1405       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1406       return inf;
1407     }
1408 
1409     // CASE: inflation in progress - inflating over a stack-lock.
1410     // Some other thread is converting from stack-locked to inflated.
1411     // Only that thread can complete inflation -- other threads must wait.
1412     // The INFLATING value is transient.
1413     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1414     // We could always eliminate polling by parking the thread on some auxiliary list.
1415     if (mark == markOopDesc::INFLATING()) {
1416       TEVENT(Inflate: spin while INFLATING);
1417       ReadStableMark(object);
1418       continue;
1419     }
1420 
1421     // CASE: stack-locked
1422     // Could be stack-locked either by this thread or by some other thread.
1423     //
1424     // Note that we allocate the objectmonitor speculatively, _before_ attempting




 153 // That is, we find a notify() or notifyAll() call that immediately precedes
 154 // the monitorexit operation.  In that case the JIT could fuse the operations
 155 // into a single notifyAndExit() runtime primitive.
 156 
 157 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
 158   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 159   assert(self->is_Java_thread(), "invariant");
 160   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
 161   NoSafepointVerifier nsv;
 162   if (obj == NULL) return false;  // slow-path for invalid obj
 163   const markOop mark = obj->mark();
 164 
 165   if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
 166     // Degenerate notify
 167     // stack-locked by caller so by definition the implied waitset is empty.
 168     return true;
 169   }
 170 
 171   if (mark->has_monitor()) {
 172     ObjectMonitor * const mon = mark->monitor();
 173     assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
 174     if (mon->owner() != self) return false;  // slow-path for IMS exception
 175 
 176     if (mon->first_waiter() != NULL) {
 177       // We have one or more waiters. Since this is an inflated monitor
 178       // that we own, we can transfer one or more threads from the waitset
 179       // to the entrylist here and now, avoiding the slow-path.
 180       if (all) {
 181         DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
 182       } else {
 183         DTRACE_MONITOR_PROBE(notify, mon, obj, self);
 184       }
 185       int tally = 0;
 186       do {
 187         mon->INotify(self);
 188         ++tally;
 189       } while (mon->first_waiter() != NULL && all);
 190       OM_PERFDATA_OP(Notifications, inc(tally));
 191     }
 192     return true;
 193   }


 197 }
 198 
 199 
 200 // The LockNode emitted directly at the synchronization site would have
 201 // been too big if it were to have included support for the cases of inflated
 202 // recursive enter and exit, so they go here instead.
 203 // Note that we can't safely call AsyncPrintJavaStack() from within
 204 // quick_enter() as our thread state remains _in_Java.
 205 
 206 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
 207                                      BasicLock * lock) {
 208   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 209   assert(Self->is_Java_thread(), "invariant");
 210   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
 211   NoSafepointVerifier nsv;
 212   if (obj == NULL) return false;       // Need to throw NPE
 213   const markOop mark = obj->mark();
 214 
 215   if (mark->has_monitor()) {
 216     ObjectMonitor * const m = mark->monitor();
 217     assert(oopDesc::equals((oop) m->object(), obj), "invariant");
 218     Thread * const owner = (Thread *) m->_owner;
 219 
 220     // Lock contention and Transactional Lock Elision (TLE) diagnostics
 221     // and observability
 222     // Case: light contention possibly amenable to TLE
 223     // Case: TLE inimical operations such as nested/recursive synchronization
 224 
 225     if (owner == Self) {
 226       m->_recursions++;
 227       return true;
 228     }
 229 
 230     // This Java Monitor is inflated so obj's header will never be
 231     // displaced to this thread's BasicLock. Make the displaced header
 232     // non-NULL so this BasicLock is not seen as recursive nor as
 233     // being locked. We do this unconditionally so that this thread's
 234     // BasicLock cannot be mis-interpreted by any stack walkers. For
 235     // performance reasons, stack walkers generally first check for
 236     // Biased Locking in the object's header, the second check is for
 237     // stack-locking in the object's header, the third check is for


1384   assert(Universe::verify_in_progress() ||
1385          !SafepointSynchronize::is_at_safepoint(), "invariant");
1386 
1387   EventJavaMonitorInflate event;
1388 
1389   for (;;) {
1390     const markOop mark = object->mark();
1391     assert(!mark->has_bias_pattern(), "invariant");
1392 
1393     // The mark can be in one of the following states:
1394     // *  Inflated     - just return
1395     // *  Stack-locked - coerce it to inflated
1396     // *  INFLATING    - busy wait for conversion to complete
1397     // *  Neutral      - aggressively inflate the object.
1398     // *  BIASED       - Illegal.  We should never see this
1399 
1400     // CASE: inflated
1401     if (mark->has_monitor()) {
1402       ObjectMonitor * inf = mark->monitor();
1403       assert(inf->header()->is_neutral(), "invariant");
1404       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
1405       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1406       return inf;
1407     }
1408 
1409     // CASE: inflation in progress - inflating over a stack-lock.
1410     // Some other thread is converting from stack-locked to inflated.
1411     // Only that thread can complete inflation -- other threads must wait.
1412     // The INFLATING value is transient.
1413     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1414     // We could always eliminate polling by parking the thread on some auxiliary list.
1415     if (mark == markOopDesc::INFLATING()) {
1416       TEVENT(Inflate: spin while INFLATING);
1417       ReadStableMark(object);
1418       continue;
1419     }
1420 
1421     // CASE: stack-locked
1422     // Could be stack-locked either by this thread or by some other thread.
1423     //
1424     // Note that we allocate the objectmonitor speculatively, _before_ attempting


< prev index next >