< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 51675 : imported patch 8210514


 303         // This is a special case where the Java Monitor was inflated
 304         // after this thread entered the stack-lock recursively. When a
 305         // Java Monitor is inflated, we cannot safely walk the Java
 306         // Monitor owner's stack and update the BasicLocks because a
 307         // Java Monitor can be asynchronously inflated by a thread that
 308         // does not own the Java Monitor.
 309         ObjectMonitor * m = mark->monitor();
 310         assert(((oop)(m->object()))->mark() == mark, "invariant");
 311         assert(m->is_entered(THREAD), "invariant");
 312       }
 313     }
 314 #endif
 315     return;
 316   }
 317 
 318   if (mark == (markOop) lock) {
 319     // If the object is stack-locked by the current thread, try to
 320     // swing the displaced header from the BasicLock back to the mark.
 321     assert(dhw->is_neutral(), "invariant");
 322     if (object->cas_set_mark(dhw, mark) == mark) {
 323       TEVENT(fast_exit: release stack-lock);
 324       return;
 325     }
 326   }
 327 
 328   // We have to take the slow-path of possible inflation and then exit.
 329   ObjectSynchronizer::inflate(THREAD,
 330                               object,
 331                               inflate_cause_vm_internal)->exit(true, THREAD);
 332 }
 333 
 334 // -----------------------------------------------------------------------------
 335 // Interpreter/Compiler Slow Case
 336 // This routine is used to handle interpreter/compiler slow case
 337 // We don't need to use fast path here, because it must have been
 338 // failed in the interpreter/compiler code.
 339 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 340   markOop mark = obj->mark();
 341   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 342 
 343   if (mark->is_neutral()) {
 344     // Anticipate successful CAS -- the ST of the displaced mark must
 345     // be visible <= the ST performed by the CAS.
 346     lock->set_displaced_header(mark);
 347     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
 348       TEVENT(slow_enter: release stacklock);
 349       return;
 350     }
 351     // Fall through to inflate() ...
 352   } else if (mark->has_locker() &&
 353              THREAD->is_lock_owned((address)mark->locker())) {
 354     assert(lock != mark->locker(), "must not re-lock the same lock");
 355     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 356     lock->set_displaced_header(NULL);
 357     return;
 358   }
 359 
 360   // The object header will never be displaced to this lock,
 361   // so it does not matter what the value is, except that it
 362   // must be non-zero to avoid looking like a re-entrant lock,
 363   // and must not look locked either.
 364   lock->set_displaced_header(markOopDesc::unused_mark());
 365   ObjectSynchronizer::inflate(THREAD,
 366                               obj(),
 367                               inflate_cause_monitor_enter)->enter(THREAD);
 368 }


 371 // We don't need to use fast path here, because it must have
 372 // failed in the interpreter/compiler code. Simply use the heavy
 373 // weight monitor should be ok, unless someone find otherwise.
 374 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 375   fast_exit(object, lock, THREAD);
 376 }
 377 
 378 // -----------------------------------------------------------------------------
 379 // Class Loader  support to workaround deadlocks on the class loader lock objects
 380 // Also used by GC
 381 // complete_exit()/reenter() are used to wait on a nested lock
 382 // i.e. to give up an outer lock completely and then re-enter
 383 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 384 //  1) complete_exit lock1 - saving recursion count
 385 //  2) wait on lock2
 386 //  3) when notified on lock2, unlock lock2
 387 //  4) reenter lock1 with original recursion count
 388 //  5) lock lock2
 389 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 390 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 391   TEVENT(complete_exit);
 392   if (UseBiasedLocking) {
 393     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 394     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 395   }
 396 
 397   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 398                                                        obj(),
 399                                                        inflate_cause_vm_internal);
 400 
 401   return monitor->complete_exit(THREAD);
 402 }
 403 
 404 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 405 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 406   TEVENT(reenter);
 407   if (UseBiasedLocking) {
 408     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 409     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 410   }
 411 
 412   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 413                                                        obj(),
 414                                                        inflate_cause_vm_internal);
 415 
 416   monitor->reenter(recursion, THREAD);
 417 }
 418 // -----------------------------------------------------------------------------
 419 // JNI locks on java objects
 420 // NOTE: must use heavy weight monitor to handle jni monitor enter
 421 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 422   // the current locking is from JNI instead of Java code
 423   TEVENT(jni_enter);
 424   if (UseBiasedLocking) {
 425     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 426     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 427   }
 428   THREAD->set_current_pending_monitor_is_from_java(false);
 429   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
 430   THREAD->set_current_pending_monitor_is_from_java(true);
 431 }
 432 
 433 // NOTE: must use heavy weight monitor to handle jni monitor exit
 434 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 435   TEVENT(jni_exit);
 436   if (UseBiasedLocking) {
 437     Handle h_obj(THREAD, obj);
 438     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 439     obj = h_obj();
 440   }
 441   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 442 
 443   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 444                                                        obj,
 445                                                        inflate_cause_jni_exit);
 446   // If this thread has locked the object, exit the monitor.  Note:  can't use
 447   // monitor->check(CHECK); must exit even if an exception is pending.
 448   if (monitor->check(THREAD)) {
 449     monitor->exit(true, THREAD);
 450   }
 451 }
 452 
 453 // -----------------------------------------------------------------------------
 454 // Internal VM locks on java objects
 455 // standard constructor, allows locking failures
 456 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
 457   _dolock = doLock;
 458   _thread = thread;
 459   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
 460   _obj = obj;
 461 
 462   if (_dolock) {
 463     TEVENT(ObjectLocker);
 464 
 465     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 466   }
 467 }
 468 
 469 ObjectLocker::~ObjectLocker() {
 470   if (_dolock) {
 471     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 472   }
 473 }
 474 
 475 
 476 // -----------------------------------------------------------------------------
 477 //  Wait/Notify/NotifyAll
 478 // NOTE: must use heavy weight monitor to handle wait()
 479 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 480   if (UseBiasedLocking) {
 481     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 482     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 483   }
 484   if (millis < 0) {
 485     TEVENT(wait - throw IAX);
 486     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 487   }
 488   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 489                                                        obj(),
 490                                                        inflate_cause_wait);
 491 
 492   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 493   monitor->wait(millis, true, THREAD);
 494 
 495   // This dummy call is in place to get around dtrace bug 6254741.  Once
 496   // that's fixed we can uncomment the following line, remove the call
 497   // and change this function back into a "void" func.
 498   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 499   return dtrace_waited_probe(monitor, obj, THREAD);
 500 }
 501 
 502 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 503   if (UseBiasedLocking) {
 504     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 505     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 506   }
 507   if (millis < 0) {
 508     TEVENT(wait - throw IAX);
 509     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 510   }
 511   ObjectSynchronizer::inflate(THREAD,
 512                               obj(),
 513                               inflate_cause_wait)->wait(millis, false, THREAD);
 514 }
 515 
 516 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 517   if (UseBiasedLocking) {
 518     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 519     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 520   }
 521 
 522   markOop mark = obj->mark();
 523   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 524     return;
 525   }
 526   ObjectSynchronizer::inflate(THREAD,
 527                               obj(),
 528                               inflate_cause_notify)->notify(THREAD);


 591   for (;;) {
 592     markOop mark = obj->mark();
 593     if (!mark->is_being_inflated()) {
 594       return mark;    // normal fast-path return
 595     }
 596 
 597     // The object is being inflated by some other thread.
 598     // The caller of ReadStableMark() must wait for inflation to complete.
 599     // Avoid live-lock
 600     // TODO: consider calling SafepointSynchronize::do_call_back() while
 601     // spinning to see if there's a safepoint pending.  If so, immediately
 602     // yielding or blocking would be appropriate.  Avoid spinning while
 603     // there is a safepoint pending.
 604     // TODO: add inflation contention performance counters.
 605     // TODO: restrict the aggregate number of spinners.
 606 
 607     ++its;
 608     if (its > 10000 || !os::is_MP()) {
 609       if (its & 1) {
 610         os::naked_yield();
 611         TEVENT(Inflate: INFLATING - yield);
 612       } else {
 613         // Note that the following code attenuates the livelock problem but is not
 614         // a complete remedy.  A more complete solution would require that the inflating
 615         // thread hold the associated inflation lock.  The following code simply restricts
 616         // the number of spinners to at most one.  We'll have N-2 threads blocked
 617         // on the inflationlock, 1 thread holding the inflation lock and using
 618         // a yield/park strategy, and 1 thread in the midst of inflation.
 619         // A more refined approach would be to change the encoding of INFLATING
 620         // to allow encapsulation of a native thread pointer.  Threads waiting for
 621         // inflation to complete would use CAS to push themselves onto a singly linked
 622         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 623         // and calling park().  When inflation was complete the thread that accomplished inflation
 624         // would detach the list and set the markword to inflated with a single CAS and
 625         // then for each thread on the list, set the flag and unpark() the thread.
 626         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
 627         // wakes at most one thread whereas we need to wake the entire list.
 628         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
 629         int YieldThenBlock = 0;
 630         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
 631         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
 632         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
 633         while (obj->mark() == markOopDesc::INFLATING()) {
 634           // Beware: NakedYield() is advisory and has almost no effect on some platforms
 635           // so we periodically call Self->_ParkEvent->park(1).
 636           // We use a mixed spin/yield/block mechanism.
 637           if ((YieldThenBlock++) >= 16) {
 638             Thread::current()->_ParkEvent->park(1);
 639           } else {
 640             os::naked_yield();
 641           }
 642         }
 643         Thread::muxRelease(gInflationLocks + ix);
 644         TEVENT(Inflate: INFLATING - yield/park);
 645       }
 646     } else {
 647       SpinPause();       // SMP-polite spinning
 648     }
 649   }
 650 }
 651 
 652 // hashCode() generation :
 653 //
 654 // Possibilities:
 655 // * MD5Digest of {obj,stwRandom}
 656 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
 657 // * A DES- or AES-style SBox[] mechanism
 658 // * One of the Phi-based schemes, such as:
 659 //   2654435761 = 2^32 * Phi (golden ratio)
 660 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
 661 // * A variation of Marsaglia's shift-xor RNG scheme.
 662 // * (obj ^ stwRandom) is appealing, but can result
 663 //   in undesirable regularity in the hashCode values of adjacent objects
 664 //   (objects allocated back-to-back, in particular).  This could potentially


 686   } else if (hashCode == 4) {
 687     value = cast_from_oop<intptr_t>(obj);
 688   } else {
 689     // Marsaglia's xor-shift scheme with thread-specific state
 690     // This is probably the best overall implementation -- we'll
 691     // likely make this the default in future releases.
 692     unsigned t = Self->_hashStateX;
 693     t ^= (t << 11);
 694     Self->_hashStateX = Self->_hashStateY;
 695     Self->_hashStateY = Self->_hashStateZ;
 696     Self->_hashStateZ = Self->_hashStateW;
 697     unsigned v = Self->_hashStateW;
 698     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 699     Self->_hashStateW = v;
 700     value = v;
 701   }
 702 
 703   value &= markOopDesc::hash_mask;
 704   if (value == 0) value = 0xBAD;
 705   assert(value != markOopDesc::no_hash, "invariant");
 706   TEVENT(hashCode: GENERATE);
 707   return value;
 708 }
 709 
 710 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 711   if (UseBiasedLocking) {
 712     // NOTE: many places throughout the JVM do not expect a safepoint
 713     // to be taken here, in particular most operations on perm gen
 714     // objects. However, we only ever bias Java instances and all of
 715     // the call sites of identity_hash that might revoke biases have
 716     // been checked to make sure they can handle a safepoint. The
 717     // added check of the bias pattern is to avoid useless calls to
 718     // thread-local storage.
 719     if (obj->mark()->has_bias_pattern()) {
 720       // Handle for oop obj in case of STW safepoint
 721       Handle hobj(Self, obj);
 722       // Relaxing assertion for bug 6320749.
 723       assert(Universe::verify_in_progress() ||
 724              !SafepointSynchronize::is_at_safepoint(),
 725              "biases should not be seen by VM thread here");
 726       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());


1137     // If the muxTry() fails then drop immediately into case 3.
1138     // If we're using thread-local free lists then try
1139     // to reprovision the caller's free list.
1140     if (gFreeList != NULL) {
1141       // Reprovision the thread's omFreeList.
1142       // Use bulk transfers to reduce the allocation rate and heat
1143       // on various locks.
1144       Thread::muxAcquire(&gListLock, "omAlloc");
1145       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1146         gMonitorFreeCount--;
1147         ObjectMonitor * take = gFreeList;
1148         gFreeList = take->FreeNext;
1149         guarantee(take->object() == NULL, "invariant");
1150         guarantee(!take->is_busy(), "invariant");
1151         take->Recycle();
1152         omRelease(Self, take, false);
1153       }
1154       Thread::muxRelease(&gListLock);
1155       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1156       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1157       TEVENT(omFirst - reprovision);
1158 
1159       const int mx = MonitorBound;
1160       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1161         // We can't safely induce a STW safepoint from omAlloc() as our thread
1162         // state may not be appropriate for such activities and callers may hold
1163         // naked oops, so instead we defer the action.
1164         InduceScavenge(Self, "omAlloc");
1165       }
1166       continue;
1167     }
1168 
1169     // 3: allocate a block of new ObjectMonitors
1170     // Both the local and global free lists are empty -- resort to malloc().
1171     // In the current implementation objectMonitors are TSM - immortal.
1172     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1173     // each ObjectMonitor to start at the beginning of a cache line,
1174     // so we use align_up().
1175     // A better solution would be to use C++ placement-new.
1176     // BEWARE: As it stands currently, we don't run the ctors!
1177     assert(_BLOCKSIZE > 1, "invariant");


1215     // list activity.
1216 
1217     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1218     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1219     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1220     gMonitorPopulation += _BLOCKSIZE-1;
1221     gMonitorFreeCount += _BLOCKSIZE-1;
1222 
1223     // Add the new block to the list of extant blocks (gBlockList).
1224     // The very first objectMonitor in a block is reserved and dedicated.
1225     // It serves as blocklist "next" linkage.
1226     temp[0].FreeNext = gBlockList;
1227     // There are lock-free uses of gBlockList so make sure that
1228     // the previous stores happen before we update gBlockList.
1229     OrderAccess::release_store(&gBlockList, temp);
1230 
1231     // Add the new string of objectMonitors to the global free list
1232     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1233     gFreeList = temp + 1;
1234     Thread::muxRelease(&gListLock);
1235     TEVENT(Allocate block of monitors);
1236   }
1237 }
1238 
1239 // Place "m" on the caller's private per-thread omFreeList.
1240 // In practice there's no need to clamp or limit the number of
1241 // monitors on a thread's omFreeList as the only time we'll call
1242 // omRelease is to return a monitor to the free list after a CAS
1243 // attempt failed.  This doesn't allow unbounded #s of monitors to
1244 // accumulate on a thread's free list.
1245 //
1246 // Key constraint: all ObjectMonitors on a thread's free list and the global
1247 // free list must have their object field set to null. This prevents the
1248 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1249 
1250 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1251                                    bool fromPerThreadAlloc) {
1252   guarantee(m->object() == NULL, "invariant");
1253   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1254   // Remove from omInUseList
1255   if (MonitorInUseLists && fromPerThreadAlloc) {


1300 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1301 // monitors have been transferred to the global in-use list).
1302 
1303 void ObjectSynchronizer::omFlush(Thread * Self) {
1304   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
1305   Self->omFreeList = NULL;
1306   ObjectMonitor * tail = NULL;
1307   int tally = 0;
1308   if (list != NULL) {
1309     ObjectMonitor * s;
1310     // The thread is going away, the per-thread free monitors
1311     // are freed via set_owner(NULL)
1312     // Link them to tail, which will be linked into the global free list
1313     // gFreeList below, under the gListLock
1314     for (s = list; s != NULL; s = s->FreeNext) {
1315       tally++;
1316       tail = s;
1317       guarantee(s->object() == NULL, "invariant");
1318       guarantee(!s->is_busy(), "invariant");
1319       s->set_owner(NULL);   // redundant but good hygiene
1320       TEVENT(omFlush - Move one);
1321     }
1322     guarantee(tail != NULL && list != NULL, "invariant");
1323   }
1324 
1325   ObjectMonitor * inUseList = Self->omInUseList;
1326   ObjectMonitor * inUseTail = NULL;
1327   int inUseTally = 0;
1328   if (inUseList != NULL) {
1329     Self->omInUseList = NULL;
1330     ObjectMonitor *cur_om;
1331     // The thread is going away, however the omInUseList inflated
1332     // monitors may still be in-use by other threads.
1333     // Link them to inUseTail, which will be linked into the global in-use list
1334     // gOmInUseList below, under the gListLock
1335     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1336       inUseTail = cur_om;
1337       inUseTally++;
1338     }
1339     assert(Self->omInUseCount == inUseTally, "in-use count off");
1340     Self->omInUseCount = 0;
1341     guarantee(inUseTail != NULL && inUseList != NULL, "invariant");
1342   }
1343 
1344   Thread::muxAcquire(&gListLock, "omFlush");
1345   if (tail != NULL) {
1346     tail->FreeNext = gFreeList;
1347     gFreeList = list;
1348     gMonitorFreeCount += tally;
1349     assert(Self->omFreeCount == tally, "free-count off");
1350     Self->omFreeCount = 0;
1351   }
1352 
1353   if (inUseTail != NULL) {
1354     inUseTail->FreeNext = gOmInUseList;
1355     gOmInUseList = inUseList;
1356     gOmInUseCount += inUseTally;
1357   }
1358 
1359   Thread::muxRelease(&gListLock);
1360   TEVENT(omFlush);
1361 }
1362 
1363 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1364                                        const oop obj,
1365                                        ObjectSynchronizer::InflateCause cause) {
1366   assert(event != NULL, "invariant");
1367   assert(event->should_commit(), "invariant");
1368   event->set_monitorClass(obj->klass());
1369   event->set_address((uintptr_t)(void*)obj);
1370   event->set_cause((u1)cause);
1371   event->commit();
1372 }
1373 
1374 // Fast path code shared by multiple functions
1375 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1376   markOop mark = obj->mark();
1377   if (mark->has_monitor()) {
1378     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1379     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1380     return mark->monitor();


1405     // *  INFLATING    - busy wait for conversion to complete
1406     // *  Neutral      - aggressively inflate the object.
1407     // *  BIASED       - Illegal.  We should never see this
1408 
1409     // CASE: inflated
1410     if (mark->has_monitor()) {
1411       ObjectMonitor * inf = mark->monitor();
1412       assert(inf->header()->is_neutral(), "invariant");
1413       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
1414       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1415       return inf;
1416     }
1417 
1418     // CASE: inflation in progress - inflating over a stack-lock.
1419     // Some other thread is converting from stack-locked to inflated.
1420     // Only that thread can complete inflation -- other threads must wait.
1421     // The INFLATING value is transient.
1422     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1423     // We could always eliminate polling by parking the thread on some auxiliary list.
1424     if (mark == markOopDesc::INFLATING()) {
1425       TEVENT(Inflate: spin while INFLATING);
1426       ReadStableMark(object);
1427       continue;
1428     }
1429 
1430     // CASE: stack-locked
1431     // Could be stack-locked either by this thread or by some other thread.
1432     //
1433     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1434     // to install INFLATING into the mark word.  We originally installed INFLATING,
1435     // allocated the objectmonitor, and then finally STed the address of the
1436     // objectmonitor into the mark.  This was correct, but artificially lengthened
1437     // the interval in which INFLATED appeared in the mark, thus increasing
1438     // the odds of inflation contention.
1439     //
1440     // We now use per-thread private objectmonitor free lists.
1441     // These list are reprovisioned from the global free list outside the
1442     // critical INFLATING...ST interval.  A thread can transfer
1443     // multiple objectmonitors en-mass from the global free list to its local free list.
1444     // This reduces coherency traffic and lock contention on the global free list.
1445     // Using such local free lists, it doesn't matter if the omAlloc() call appears


1498       // Setup monitor fields to proper values -- prepare the monitor
1499       m->set_header(dmw);
1500 
1501       // Optimization: if the mark->locker stack address is associated
1502       // with this thread we could simply set m->_owner = Self.
1503       // Note that a thread can inflate an object
1504       // that it has stack-locked -- as might happen in wait() -- directly
1505       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1506       m->set_owner(mark->locker());
1507       m->set_object(object);
1508       // TODO-FIXME: assert BasicLock->dhw != 0.
1509 
1510       // Must preserve store ordering. The monitor state must
1511       // be stable at the time of publishing the monitor address.
1512       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1513       object->release_set_mark(markOopDesc::encode(m));
1514 
1515       // Hopefully the performance counters are allocated on distinct cache lines
1516       // to avoid false sharing on MP systems ...
1517       OM_PERFDATA_OP(Inflations, inc());
1518       TEVENT(Inflate: overwrite stacklock);
1519       if (log_is_enabled(Debug, monitorinflation)) {
1520         if (object->is_instance()) {
1521           ResourceMark rm;
1522           log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1523                                       p2i(object), p2i(object->mark()),
1524                                       object->klass()->external_name());
1525         }
1526       }
1527       if (event.should_commit()) {
1528         post_monitor_inflate_event(&event, object, cause);
1529       }
1530       return m;
1531     }
1532 
1533     // CASE: neutral
1534     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1535     // If we know we're inflating for entry it's better to inflate by swinging a
1536     // pre-locked objectMonitor pointer into the object header.   A successful
1537     // CAS inflates the object *and* confers ownership to the inflating thread.
1538     // In the current implementation we use a 2-step mechanism where we CAS()


1549     m->set_object(object);
1550     m->_recursions   = 0;
1551     m->_Responsible  = NULL;
1552     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1553 
1554     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1555       m->set_object(NULL);
1556       m->set_owner(NULL);
1557       m->Recycle();
1558       omRelease(Self, m, true);
1559       m = NULL;
1560       continue;
1561       // interference - the markword changed - just retry.
1562       // The state-transitions are one-way, so there's no chance of
1563       // live-lock -- "Inflated" is an absorbing state.
1564     }
1565 
1566     // Hopefully the performance counters are allocated on distinct
1567     // cache lines to avoid false sharing on MP systems ...
1568     OM_PERFDATA_OP(Inflations, inc());
1569     TEVENT(Inflate: overwrite neutral);
1570     if (log_is_enabled(Debug, monitorinflation)) {
1571       if (object->is_instance()) {
1572         ResourceMark rm;
1573         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1574                                     p2i(object), p2i(object->mark()),
1575                                     object->klass()->external_name());
1576       }
1577     }
1578     if (event.should_commit()) {
1579       post_monitor_inflate_event(&event, object, cause);
1580     }
1581     return m;
1582   }
1583 }
1584 
1585 
1586 // Deflate_idle_monitors() is called at all safepoints, immediately
1587 // after all mutators are stopped, but before any objects have moved.
1588 // It traverses the list of known monitors, deflating where possible.
1589 // The scavenged monitor are returned to the monitor free list.


1616 };
1617 
1618 // Deflate a single monitor if not in-use
1619 // Return true if deflated, false if in-use
1620 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1621                                          ObjectMonitor** freeHeadp,
1622                                          ObjectMonitor** freeTailp) {
1623   bool deflated;
1624   // Normal case ... The monitor is associated with obj.
1625   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1626   guarantee(mid == obj->mark()->monitor(), "invariant");
1627   guarantee(mid->header()->is_neutral(), "invariant");
1628 
1629   if (mid->is_busy()) {
1630     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1631     deflated = false;
1632   } else {
1633     // Deflate the monitor if it is no longer being used
1634     // It's idle - scavenge and return to the global free list
1635     // plain old deflation ...
1636     TEVENT(deflate_idle_monitors - scavenge1);
1637     if (log_is_enabled(Debug, monitorinflation)) {
1638       if (obj->is_instance()) {
1639         ResourceMark rm;
1640         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1641                                     "mark " INTPTR_FORMAT " , type %s",
1642                                     p2i(obj), p2i(obj->mark()),
1643                                     obj->klass()->external_name());
1644       }
1645     }
1646 
1647     // Restore the header back to obj
1648     obj->release_set_mark(mid->header());
1649     mid->clear();
1650 
1651     assert(mid->object() == NULL, "invariant");
1652 
1653     // Move the object to the working free list defined by freeHeadp, freeTailp
1654     if (*freeHeadp == NULL) *freeHeadp = mid;
1655     if (*freeTailp != NULL) {
1656       ObjectMonitor * prevtail = *freeTailp;


1702       cur_mid_in_use = mid;
1703       mid = mid->FreeNext;
1704     }
1705   }
1706   return deflated_count;
1707 }
1708 
1709 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1710   counters->nInuse = 0;          // currently associated with objects
1711   counters->nInCirculation = 0;  // extant
1712   counters->nScavenged = 0;      // reclaimed
1713 }
1714 
1715 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1716   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1717   bool deflated = false;
1718 
1719   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1720   ObjectMonitor * freeTailp = NULL;
1721 
1722   TEVENT(deflate_idle_monitors);
1723   // Prevent omFlush from changing mids in Thread dtor's during deflation
1724   // And in case the vm thread is acquiring a lock during a safepoint
1725   // See e.g. 6320749
1726   Thread::muxAcquire(&gListLock, "scavenge - return");
1727 
1728   if (MonitorInUseLists) {
1729     // Note: the thread-local monitors lists get deflated in
1730     // a separate pass. See deflate_thread_local_monitors().
1731 
1732     // For moribund threads, scan gOmInUseList
1733     if (gOmInUseList) {
1734       counters->nInCirculation += gOmInUseCount;
1735       int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1736       gOmInUseCount -= deflated_count;
1737       counters->nScavenged += deflated_count;
1738       counters->nInuse += gOmInUseCount;
1739     }
1740 
1741   } else {
1742     PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);




 303         // This is a special case where the Java Monitor was inflated
 304         // after this thread entered the stack-lock recursively. When a
 305         // Java Monitor is inflated, we cannot safely walk the Java
 306         // Monitor owner's stack and update the BasicLocks because a
 307         // Java Monitor can be asynchronously inflated by a thread that
 308         // does not own the Java Monitor.
 309         ObjectMonitor * m = mark->monitor();
 310         assert(((oop)(m->object()))->mark() == mark, "invariant");
 311         assert(m->is_entered(THREAD), "invariant");
 312       }
 313     }
 314 #endif
 315     return;
 316   }
 317 
 318   if (mark == (markOop) lock) {
 319     // If the object is stack-locked by the current thread, try to
 320     // swing the displaced header from the BasicLock back to the mark.
 321     assert(dhw->is_neutral(), "invariant");
 322     if (object->cas_set_mark(dhw, mark) == mark) {

 323       return;
 324     }
 325   }
 326 
 327   // We have to take the slow-path of possible inflation and then exit.
 328   ObjectSynchronizer::inflate(THREAD,
 329                               object,
 330                               inflate_cause_vm_internal)->exit(true, THREAD);
 331 }
 332 
 333 // -----------------------------------------------------------------------------
 334 // Interpreter/Compiler Slow Case
 335 // This routine is used to handle interpreter/compiler slow case
 336 // We don't need to use fast path here, because it must have been
 337 // failed in the interpreter/compiler code.
 338 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 339   markOop mark = obj->mark();
 340   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 341 
 342   if (mark->is_neutral()) {
 343     // Anticipate successful CAS -- the ST of the displaced mark must
 344     // be visible <= the ST performed by the CAS.
 345     lock->set_displaced_header(mark);
 346     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {

 347       return;
 348     }
 349     // Fall through to inflate() ...
 350   } else if (mark->has_locker() &&
 351              THREAD->is_lock_owned((address)mark->locker())) {
 352     assert(lock != mark->locker(), "must not re-lock the same lock");
 353     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 354     lock->set_displaced_header(NULL);
 355     return;
 356   }
 357 
 358   // The object header will never be displaced to this lock,
 359   // so it does not matter what the value is, except that it
 360   // must be non-zero to avoid looking like a re-entrant lock,
 361   // and must not look locked either.
 362   lock->set_displaced_header(markOopDesc::unused_mark());
 363   ObjectSynchronizer::inflate(THREAD,
 364                               obj(),
 365                               inflate_cause_monitor_enter)->enter(THREAD);
 366 }


 369 // We don't need to use fast path here, because it must have
 370 // failed in the interpreter/compiler code. Simply use the heavy
 371 // weight monitor should be ok, unless someone find otherwise.
 372 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 373   fast_exit(object, lock, THREAD);
 374 }
 375 
 376 // -----------------------------------------------------------------------------
 377 // Class Loader  support to workaround deadlocks on the class loader lock objects
 378 // Also used by GC
 379 // complete_exit()/reenter() are used to wait on a nested lock
 380 // i.e. to give up an outer lock completely and then re-enter
 381 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 382 //  1) complete_exit lock1 - saving recursion count
 383 //  2) wait on lock2
 384 //  3) when notified on lock2, unlock lock2
 385 //  4) reenter lock1 with original recursion count
 386 //  5) lock lock2
 387 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 388 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {

 389   if (UseBiasedLocking) {
 390     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 391     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 392   }
 393 
 394   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 395                                                        obj(),
 396                                                        inflate_cause_vm_internal);
 397 
 398   return monitor->complete_exit(THREAD);
 399 }
 400 
 401 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 402 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {

 403   if (UseBiasedLocking) {
 404     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 405     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 406   }
 407 
 408   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 409                                                        obj(),
 410                                                        inflate_cause_vm_internal);
 411 
 412   monitor->reenter(recursion, THREAD);
 413 }
 414 // -----------------------------------------------------------------------------
 415 // JNI locks on java objects
 416 // NOTE: must use heavy weight monitor to handle jni monitor enter
 417 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 418   // the current locking is from JNI instead of Java code

 419   if (UseBiasedLocking) {
 420     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 421     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 422   }
 423   THREAD->set_current_pending_monitor_is_from_java(false);
 424   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
 425   THREAD->set_current_pending_monitor_is_from_java(true);
 426 }
 427 
 428 // NOTE: must use heavy weight monitor to handle jni monitor exit
 429 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {

 430   if (UseBiasedLocking) {
 431     Handle h_obj(THREAD, obj);
 432     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 433     obj = h_obj();
 434   }
 435   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 436 
 437   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 438                                                        obj,
 439                                                        inflate_cause_jni_exit);
 440   // If this thread has locked the object, exit the monitor.  Note:  can't use
 441   // monitor->check(CHECK); must exit even if an exception is pending.
 442   if (monitor->check(THREAD)) {
 443     monitor->exit(true, THREAD);
 444   }
 445 }
 446 
 447 // -----------------------------------------------------------------------------
 448 // Internal VM locks on java objects
 449 // standard constructor, allows locking failures
 450 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
 451   _dolock = doLock;
 452   _thread = thread;
 453   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
 454   _obj = obj;
 455 
 456   if (_dolock) {


 457     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 458   }
 459 }
 460 
 461 ObjectLocker::~ObjectLocker() {
 462   if (_dolock) {
 463     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 464   }
 465 }
 466 
 467 
 468 // -----------------------------------------------------------------------------
 469 //  Wait/Notify/NotifyAll
 470 // NOTE: must use heavy weight monitor to handle wait()
 471 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 472   if (UseBiasedLocking) {
 473     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 474     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 475   }
 476   if (millis < 0) {

 477     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 478   }
 479   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 480                                                        obj(),
 481                                                        inflate_cause_wait);
 482 
 483   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 484   monitor->wait(millis, true, THREAD);
 485 
 486   // This dummy call is in place to get around dtrace bug 6254741.  Once
 487   // that's fixed we can uncomment the following line, remove the call
 488   // and change this function back into a "void" func.
 489   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 490   return dtrace_waited_probe(monitor, obj, THREAD);
 491 }
 492 
 493 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 494   if (UseBiasedLocking) {
 495     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 496     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 497   }
 498   if (millis < 0) {

 499     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 500   }
 501   ObjectSynchronizer::inflate(THREAD,
 502                               obj(),
 503                               inflate_cause_wait)->wait(millis, false, THREAD);
 504 }
 505 
 506 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 507   if (UseBiasedLocking) {
 508     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 509     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 510   }
 511 
 512   markOop mark = obj->mark();
 513   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 514     return;
 515   }
 516   ObjectSynchronizer::inflate(THREAD,
 517                               obj(),
 518                               inflate_cause_notify)->notify(THREAD);


 581   for (;;) {
 582     markOop mark = obj->mark();
 583     if (!mark->is_being_inflated()) {
 584       return mark;    // normal fast-path return
 585     }
 586 
 587     // The object is being inflated by some other thread.
 588     // The caller of ReadStableMark() must wait for inflation to complete.
 589     // Avoid live-lock
 590     // TODO: consider calling SafepointSynchronize::do_call_back() while
 591     // spinning to see if there's a safepoint pending.  If so, immediately
 592     // yielding or blocking would be appropriate.  Avoid spinning while
 593     // there is a safepoint pending.
 594     // TODO: add inflation contention performance counters.
 595     // TODO: restrict the aggregate number of spinners.
 596 
 597     ++its;
 598     if (its > 10000 || !os::is_MP()) {
 599       if (its & 1) {
 600         os::naked_yield();

 601       } else {
 602         // Note that the following code attenuates the livelock problem but is not
 603         // a complete remedy.  A more complete solution would require that the inflating
 604         // thread hold the associated inflation lock.  The following code simply restricts
 605         // the number of spinners to at most one.  We'll have N-2 threads blocked
 606         // on the inflationlock, 1 thread holding the inflation lock and using
 607         // a yield/park strategy, and 1 thread in the midst of inflation.
 608         // A more refined approach would be to change the encoding of INFLATING
 609         // to allow encapsulation of a native thread pointer.  Threads waiting for
 610         // inflation to complete would use CAS to push themselves onto a singly linked
 611         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 612         // and calling park().  When inflation was complete the thread that accomplished inflation
 613         // would detach the list and set the markword to inflated with a single CAS and
 614         // then for each thread on the list, set the flag and unpark() the thread.
 615         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
 616         // wakes at most one thread whereas we need to wake the entire list.
 617         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
 618         int YieldThenBlock = 0;
 619         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
 620         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
 621         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
 622         while (obj->mark() == markOopDesc::INFLATING()) {
 623           // Beware: NakedYield() is advisory and has almost no effect on some platforms
 624           // so we periodically call Self->_ParkEvent->park(1).
 625           // We use a mixed spin/yield/block mechanism.
 626           if ((YieldThenBlock++) >= 16) {
 627             Thread::current()->_ParkEvent->park(1);
 628           } else {
 629             os::naked_yield();
 630           }
 631         }
 632         Thread::muxRelease(gInflationLocks + ix);

 633       }
 634     } else {
 635       SpinPause();       // SMP-polite spinning
 636     }
 637   }
 638 }
 639 
 640 // hashCode() generation :
 641 //
 642 // Possibilities:
 643 // * MD5Digest of {obj,stwRandom}
 644 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
 645 // * A DES- or AES-style SBox[] mechanism
 646 // * One of the Phi-based schemes, such as:
 647 //   2654435761 = 2^32 * Phi (golden ratio)
 648 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
 649 // * A variation of Marsaglia's shift-xor RNG scheme.
 650 // * (obj ^ stwRandom) is appealing, but can result
 651 //   in undesirable regularity in the hashCode values of adjacent objects
 652 //   (objects allocated back-to-back, in particular).  This could potentially


 674   } else if (hashCode == 4) {
 675     value = cast_from_oop<intptr_t>(obj);
 676   } else {
 677     // Marsaglia's xor-shift scheme with thread-specific state
 678     // This is probably the best overall implementation -- we'll
 679     // likely make this the default in future releases.
 680     unsigned t = Self->_hashStateX;
 681     t ^= (t << 11);
 682     Self->_hashStateX = Self->_hashStateY;
 683     Self->_hashStateY = Self->_hashStateZ;
 684     Self->_hashStateZ = Self->_hashStateW;
 685     unsigned v = Self->_hashStateW;
 686     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 687     Self->_hashStateW = v;
 688     value = v;
 689   }
 690 
 691   value &= markOopDesc::hash_mask;
 692   if (value == 0) value = 0xBAD;
 693   assert(value != markOopDesc::no_hash, "invariant");

 694   return value;
 695 }
 696 
 697 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 698   if (UseBiasedLocking) {
 699     // NOTE: many places throughout the JVM do not expect a safepoint
 700     // to be taken here, in particular most operations on perm gen
 701     // objects. However, we only ever bias Java instances and all of
 702     // the call sites of identity_hash that might revoke biases have
 703     // been checked to make sure they can handle a safepoint. The
 704     // added check of the bias pattern is to avoid useless calls to
 705     // thread-local storage.
 706     if (obj->mark()->has_bias_pattern()) {
 707       // Handle for oop obj in case of STW safepoint
 708       Handle hobj(Self, obj);
 709       // Relaxing assertion for bug 6320749.
 710       assert(Universe::verify_in_progress() ||
 711              !SafepointSynchronize::is_at_safepoint(),
 712              "biases should not be seen by VM thread here");
 713       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());


1124     // If the muxTry() fails then drop immediately into case 3.
1125     // If we're using thread-local free lists then try
1126     // to reprovision the caller's free list.
1127     if (gFreeList != NULL) {
1128       // Reprovision the thread's omFreeList.
1129       // Use bulk transfers to reduce the allocation rate and heat
1130       // on various locks.
1131       Thread::muxAcquire(&gListLock, "omAlloc");
1132       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1133         gMonitorFreeCount--;
1134         ObjectMonitor * take = gFreeList;
1135         gFreeList = take->FreeNext;
1136         guarantee(take->object() == NULL, "invariant");
1137         guarantee(!take->is_busy(), "invariant");
1138         take->Recycle();
1139         omRelease(Self, take, false);
1140       }
1141       Thread::muxRelease(&gListLock);
1142       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1143       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;

1144 
1145       const int mx = MonitorBound;
1146       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1147         // We can't safely induce a STW safepoint from omAlloc() as our thread
1148         // state may not be appropriate for such activities and callers may hold
1149         // naked oops, so instead we defer the action.
1150         InduceScavenge(Self, "omAlloc");
1151       }
1152       continue;
1153     }
1154 
1155     // 3: allocate a block of new ObjectMonitors
1156     // Both the local and global free lists are empty -- resort to malloc().
1157     // In the current implementation objectMonitors are TSM - immortal.
1158     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1159     // each ObjectMonitor to start at the beginning of a cache line,
1160     // so we use align_up().
1161     // A better solution would be to use C++ placement-new.
1162     // BEWARE: As it stands currently, we don't run the ctors!
1163     assert(_BLOCKSIZE > 1, "invariant");


1201     // list activity.
1202 
1203     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1204     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1205     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1206     gMonitorPopulation += _BLOCKSIZE-1;
1207     gMonitorFreeCount += _BLOCKSIZE-1;
1208 
1209     // Add the new block to the list of extant blocks (gBlockList).
1210     // The very first objectMonitor in a block is reserved and dedicated.
1211     // It serves as blocklist "next" linkage.
1212     temp[0].FreeNext = gBlockList;
1213     // There are lock-free uses of gBlockList so make sure that
1214     // the previous stores happen before we update gBlockList.
1215     OrderAccess::release_store(&gBlockList, temp);
1216 
1217     // Add the new string of objectMonitors to the global free list
1218     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1219     gFreeList = temp + 1;
1220     Thread::muxRelease(&gListLock);

1221   }
1222 }
1223 
1224 // Place "m" on the caller's private per-thread omFreeList.
1225 // In practice there's no need to clamp or limit the number of
1226 // monitors on a thread's omFreeList as the only time we'll call
1227 // omRelease is to return a monitor to the free list after a CAS
1228 // attempt failed.  This doesn't allow unbounded #s of monitors to
1229 // accumulate on a thread's free list.
1230 //
1231 // Key constraint: all ObjectMonitors on a thread's free list and the global
1232 // free list must have their object field set to null. This prevents the
1233 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1234 
1235 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1236                                    bool fromPerThreadAlloc) {
1237   guarantee(m->object() == NULL, "invariant");
1238   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1239   // Remove from omInUseList
1240   if (MonitorInUseLists && fromPerThreadAlloc) {


1285 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1286 // monitors have been transferred to the global in-use list).
1287 
1288 void ObjectSynchronizer::omFlush(Thread * Self) {
1289   ObjectMonitor * list = Self->omFreeList;  // Null-terminated SLL
1290   Self->omFreeList = NULL;
1291   ObjectMonitor * tail = NULL;
1292   int tally = 0;
1293   if (list != NULL) {
1294     ObjectMonitor * s;
1295     // The thread is going away, the per-thread free monitors
1296     // are freed via set_owner(NULL)
1297     // Link them to tail, which will be linked into the global free list
1298     // gFreeList below, under the gListLock
1299     for (s = list; s != NULL; s = s->FreeNext) {
1300       tally++;
1301       tail = s;
1302       guarantee(s->object() == NULL, "invariant");
1303       guarantee(!s->is_busy(), "invariant");
1304       s->set_owner(NULL);   // redundant but good hygiene

1305     }
1306     guarantee(tail != NULL && list != NULL, "invariant");
1307   }
1308 
1309   ObjectMonitor * inUseList = Self->omInUseList;
1310   ObjectMonitor * inUseTail = NULL;
1311   int inUseTally = 0;
1312   if (inUseList != NULL) {
1313     Self->omInUseList = NULL;
1314     ObjectMonitor *cur_om;
1315     // The thread is going away, however the omInUseList inflated
1316     // monitors may still be in-use by other threads.
1317     // Link them to inUseTail, which will be linked into the global in-use list
1318     // gOmInUseList below, under the gListLock
1319     for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1320       inUseTail = cur_om;
1321       inUseTally++;
1322     }
1323     assert(Self->omInUseCount == inUseTally, "in-use count off");
1324     Self->omInUseCount = 0;
1325     guarantee(inUseTail != NULL && inUseList != NULL, "invariant");
1326   }
1327 
1328   Thread::muxAcquire(&gListLock, "omFlush");
1329   if (tail != NULL) {
1330     tail->FreeNext = gFreeList;
1331     gFreeList = list;
1332     gMonitorFreeCount += tally;
1333     assert(Self->omFreeCount == tally, "free-count off");
1334     Self->omFreeCount = 0;
1335   }
1336 
1337   if (inUseTail != NULL) {
1338     inUseTail->FreeNext = gOmInUseList;
1339     gOmInUseList = inUseList;
1340     gOmInUseCount += inUseTally;
1341   }
1342 
1343   Thread::muxRelease(&gListLock);

1344 }
1345 
1346 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1347                                        const oop obj,
1348                                        ObjectSynchronizer::InflateCause cause) {
1349   assert(event != NULL, "invariant");
1350   assert(event->should_commit(), "invariant");
1351   event->set_monitorClass(obj->klass());
1352   event->set_address((uintptr_t)(void*)obj);
1353   event->set_cause((u1)cause);
1354   event->commit();
1355 }
1356 
1357 // Fast path code shared by multiple functions
1358 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1359   markOop mark = obj->mark();
1360   if (mark->has_monitor()) {
1361     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1362     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1363     return mark->monitor();


1388     // *  INFLATING    - busy wait for conversion to complete
1389     // *  Neutral      - aggressively inflate the object.
1390     // *  BIASED       - Illegal.  We should never see this
1391 
1392     // CASE: inflated
1393     if (mark->has_monitor()) {
1394       ObjectMonitor * inf = mark->monitor();
1395       assert(inf->header()->is_neutral(), "invariant");
1396       assert(oopDesc::equals((oop) inf->object(), object), "invariant");
1397       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1398       return inf;
1399     }
1400 
1401     // CASE: inflation in progress - inflating over a stack-lock.
1402     // Some other thread is converting from stack-locked to inflated.
1403     // Only that thread can complete inflation -- other threads must wait.
1404     // The INFLATING value is transient.
1405     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1406     // We could always eliminate polling by parking the thread on some auxiliary list.
1407     if (mark == markOopDesc::INFLATING()) {

1408       ReadStableMark(object);
1409       continue;
1410     }
1411 
1412     // CASE: stack-locked
1413     // Could be stack-locked either by this thread or by some other thread.
1414     //
1415     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1416     // to install INFLATING into the mark word.  We originally installed INFLATING,
1417     // allocated the objectmonitor, and then finally STed the address of the
1418     // objectmonitor into the mark.  This was correct, but artificially lengthened
1419     // the interval in which INFLATED appeared in the mark, thus increasing
1420     // the odds of inflation contention.
1421     //
1422     // We now use per-thread private objectmonitor free lists.
1423     // These list are reprovisioned from the global free list outside the
1424     // critical INFLATING...ST interval.  A thread can transfer
1425     // multiple objectmonitors en-mass from the global free list to its local free list.
1426     // This reduces coherency traffic and lock contention on the global free list.
1427     // Using such local free lists, it doesn't matter if the omAlloc() call appears


1480       // Setup monitor fields to proper values -- prepare the monitor
1481       m->set_header(dmw);
1482 
1483       // Optimization: if the mark->locker stack address is associated
1484       // with this thread we could simply set m->_owner = Self.
1485       // Note that a thread can inflate an object
1486       // that it has stack-locked -- as might happen in wait() -- directly
1487       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1488       m->set_owner(mark->locker());
1489       m->set_object(object);
1490       // TODO-FIXME: assert BasicLock->dhw != 0.
1491 
1492       // Must preserve store ordering. The monitor state must
1493       // be stable at the time of publishing the monitor address.
1494       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1495       object->release_set_mark(markOopDesc::encode(m));
1496 
1497       // Hopefully the performance counters are allocated on distinct cache lines
1498       // to avoid false sharing on MP systems ...
1499       OM_PERFDATA_OP(Inflations, inc());

1500       if (log_is_enabled(Debug, monitorinflation)) {
1501         if (object->is_instance()) {
1502           ResourceMark rm;
1503           log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1504                                       p2i(object), p2i(object->mark()),
1505                                       object->klass()->external_name());
1506         }
1507       }
1508       if (event.should_commit()) {
1509         post_monitor_inflate_event(&event, object, cause);
1510       }
1511       return m;
1512     }
1513 
1514     // CASE: neutral
1515     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1516     // If we know we're inflating for entry it's better to inflate by swinging a
1517     // pre-locked objectMonitor pointer into the object header.   A successful
1518     // CAS inflates the object *and* confers ownership to the inflating thread.
1519     // In the current implementation we use a 2-step mechanism where we CAS()


1530     m->set_object(object);
1531     m->_recursions   = 0;
1532     m->_Responsible  = NULL;
1533     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1534 
1535     if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1536       m->set_object(NULL);
1537       m->set_owner(NULL);
1538       m->Recycle();
1539       omRelease(Self, m, true);
1540       m = NULL;
1541       continue;
1542       // interference - the markword changed - just retry.
1543       // The state-transitions are one-way, so there's no chance of
1544       // live-lock -- "Inflated" is an absorbing state.
1545     }
1546 
1547     // Hopefully the performance counters are allocated on distinct
1548     // cache lines to avoid false sharing on MP systems ...
1549     OM_PERFDATA_OP(Inflations, inc());

1550     if (log_is_enabled(Debug, monitorinflation)) {
1551       if (object->is_instance()) {
1552         ResourceMark rm;
1553         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1554                                     p2i(object), p2i(object->mark()),
1555                                     object->klass()->external_name());
1556       }
1557     }
1558     if (event.should_commit()) {
1559       post_monitor_inflate_event(&event, object, cause);
1560     }
1561     return m;
1562   }
1563 }
1564 
1565 
1566 // Deflate_idle_monitors() is called at all safepoints, immediately
1567 // after all mutators are stopped, but before any objects have moved.
1568 // It traverses the list of known monitors, deflating where possible.
1569 // The scavenged monitor are returned to the monitor free list.


1596 };
1597 
1598 // Deflate a single monitor if not in-use
1599 // Return true if deflated, false if in-use
1600 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1601                                          ObjectMonitor** freeHeadp,
1602                                          ObjectMonitor** freeTailp) {
1603   bool deflated;
1604   // Normal case ... The monitor is associated with obj.
1605   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1606   guarantee(mid == obj->mark()->monitor(), "invariant");
1607   guarantee(mid->header()->is_neutral(), "invariant");
1608 
1609   if (mid->is_busy()) {
1610     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1611     deflated = false;
1612   } else {
1613     // Deflate the monitor if it is no longer being used
1614     // It's idle - scavenge and return to the global free list
1615     // plain old deflation ...

1616     if (log_is_enabled(Debug, monitorinflation)) {
1617       if (obj->is_instance()) {
1618         ResourceMark rm;
1619         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1620                                     "mark " INTPTR_FORMAT " , type %s",
1621                                     p2i(obj), p2i(obj->mark()),
1622                                     obj->klass()->external_name());
1623       }
1624     }
1625 
1626     // Restore the header back to obj
1627     obj->release_set_mark(mid->header());
1628     mid->clear();
1629 
1630     assert(mid->object() == NULL, "invariant");
1631 
1632     // Move the object to the working free list defined by freeHeadp, freeTailp
1633     if (*freeHeadp == NULL) *freeHeadp = mid;
1634     if (*freeTailp != NULL) {
1635       ObjectMonitor * prevtail = *freeTailp;


1681       cur_mid_in_use = mid;
1682       mid = mid->FreeNext;
1683     }
1684   }
1685   return deflated_count;
1686 }
1687 
1688 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1689   counters->nInuse = 0;          // currently associated with objects
1690   counters->nInCirculation = 0;  // extant
1691   counters->nScavenged = 0;      // reclaimed
1692 }
1693 
1694 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1695   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1696   bool deflated = false;
1697 
1698   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1699   ObjectMonitor * freeTailp = NULL;
1700 

1701   // Prevent omFlush from changing mids in Thread dtor's during deflation
1702   // And in case the vm thread is acquiring a lock during a safepoint
1703   // See e.g. 6320749
1704   Thread::muxAcquire(&gListLock, "scavenge - return");
1705 
1706   if (MonitorInUseLists) {
1707     // Note: the thread-local monitors lists get deflated in
1708     // a separate pass. See deflate_thread_local_monitors().
1709 
1710     // For moribund threads, scan gOmInUseList
1711     if (gOmInUseList) {
1712       counters->nInCirculation += gOmInUseCount;
1713       int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1714       gOmInUseCount -= deflated_count;
1715       counters->nScavenged += deflated_count;
1716       counters->nInuse += gOmInUseCount;
1717     }
1718 
1719   } else {
1720     PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);


< prev index next >