< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 54612 : Checkpoint latest preliminary review patches for full OpenJDK review; merge with 8222295.patch.
rev 54613 : imported patch dcubed.monitor_deflate_conc.v2.01
rev 54614 : imported patch dcubed.monitor_deflate_conc.v2.02
rev 54615 : imported patch dcubed.monitor_deflate_conc.v2.03


 333     // If the object is stack-locked by the current thread, try to
 334     // swing the displaced header from the BasicLock back to the mark.
 335     assert(dhw->is_neutral(), "invariant");
 336     if (object->cas_set_mark(dhw, mark) == mark) {
 337       return;
 338     }
 339   }
 340 
 341   // We have to take the slow-path of possible inflation and then exit.
 342   ObjectMonitorHandle omh;
 343   inflate(&omh, THREAD, object, inflate_cause_vm_internal);
 344   omh.om_ptr()->exit(true, THREAD);
 345 }
 346 
 347 // -----------------------------------------------------------------------------
 348 // Interpreter/Compiler Slow Case
 349 // This routine is used to handle interpreter/compiler slow case
 350 // We don't need to use fast path here, because it must have been
 351 // failed in the interpreter/compiler code.
 352 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 353   bool do_loop = true;
 354   while (do_loop) {
 355     markOop mark = obj->mark();
 356     assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 357 
 358     if (mark->is_neutral()) {
 359       // Anticipate successful CAS -- the ST of the displaced mark must
 360       // be visible <= the ST performed by the CAS.
 361       lock->set_displaced_header(mark);
 362       if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
 363         return;
 364       }
 365       // Fall through to inflate() ...
 366     } else if (mark->has_locker() &&
 367                THREAD->is_lock_owned((address)mark->locker())) {
 368       assert(lock != mark->locker(), "must not re-lock the same lock");
 369       assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 370       lock->set_displaced_header(NULL);
 371       return;
 372     }
 373 
 374     // The object header will never be displaced to this lock,
 375     // so it does not matter what the value is, except that it
 376     // must be non-zero to avoid looking like a re-entrant lock,
 377     // and must not look locked either.
 378     lock->set_displaced_header(markOopDesc::unused_mark());
 379     ObjectMonitorHandle omh;
 380     inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter);
 381     do_loop = !omh.om_ptr()->enter(THREAD);
 382   }
 383 }
 384 
 385 // This routine is used to handle interpreter/compiler slow case
 386 // We don't need to use fast path here, because it must have
 387 // failed in the interpreter/compiler code. Simply use the heavy
 388 // weight monitor should be ok, unless someone find otherwise.
 389 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 390   fast_exit(object, lock, THREAD);
 391 }
 392 
 393 // -----------------------------------------------------------------------------
 394 // Class Loader  support to workaround deadlocks on the class loader lock objects
 395 // Also used by GC
 396 // complete_exit()/reenter() are used to wait on a nested lock
 397 // i.e. to give up an outer lock completely and then re-enter
 398 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 399 //  1) complete_exit lock1 - saving recursion count
 400 //  2) wait on lock2
 401 //  3) when notified on lock2, unlock lock2
 402 //  4) reenter lock1 with original recursion count


 404 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 405 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 406   if (UseBiasedLocking) {
 407     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 408     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 409   }
 410 
 411   ObjectMonitorHandle omh;
 412   inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
 413   intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD);
 414   return ret_code;
 415 }
 416 
 417 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 418 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 419   if (UseBiasedLocking) {
 420     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 421     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 422   }
 423 
 424   bool do_loop = true;
 425   while (do_loop) {
 426     ObjectMonitorHandle omh;
 427     inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
 428     do_loop = !omh.om_ptr()->reenter(recursion, THREAD);
 429   }
 430 }
 431 // -----------------------------------------------------------------------------
 432 // JNI locks on java objects
 433 // NOTE: must use heavy weight monitor to handle jni monitor enter
 434 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 435   // the current locking is from JNI instead of Java code
 436   if (UseBiasedLocking) {
 437     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 438     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 439   }
 440   THREAD->set_current_pending_monitor_is_from_java(false);
 441   bool do_loop = true;
 442   while (do_loop) {
 443     ObjectMonitorHandle omh;
 444     inflate(&omh, THREAD, obj(), inflate_cause_jni_enter);
 445     do_loop = !omh.om_ptr()->enter(THREAD);
 446   }
 447   THREAD->set_current_pending_monitor_is_from_java(true);
 448 }
 449 
 450 // NOTE: must use heavy weight monitor to handle jni monitor exit
 451 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 452   if (UseBiasedLocking) {
 453     Handle h_obj(THREAD, obj);
 454     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 455     obj = h_obj();
 456   }
 457   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 458 
 459   ObjectMonitorHandle omh;
 460   inflate(&omh, THREAD, obj, inflate_cause_jni_exit);
 461   ObjectMonitor * monitor = omh.om_ptr();
 462   // If this thread has locked the object, exit the monitor.  Note:  can't use
 463   // monitor->check(CHECK); must exit even if an exception is pending.
 464   if (monitor->check(THREAD)) {
 465     monitor->exit(true, THREAD);
 466   }


1157 
1158     // 2: try to allocate from the global gFreeList
1159     // CONSIDER: use muxTry() instead of muxAcquire().
1160     // If the muxTry() fails then drop immediately into case 3.
1161     // If we're using thread-local free lists then try
1162     // to reprovision the caller's free list.
1163     if (gFreeList != NULL) {
1164       // Reprovision the thread's omFreeList.
1165       // Use bulk transfers to reduce the allocation rate and heat
1166       // on various locks.
1167       Thread::muxAcquire(&gListLock, "omAlloc(1)");
1168       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1169         gMonitorFreeCount--;
1170         ObjectMonitor * take = gFreeList;
1171         gFreeList = take->FreeNext;
1172         guarantee(take->object() == NULL, "invariant");
1173         if (AsyncDeflateIdleMonitors) {
1174           // Clear any values we allowed to linger during async deflation.
1175           take->_header = NULL;
1176           take->set_owner(NULL);
1177           take->_contentions = 0;
1178 
1179           if (take->ref_count() < 0) {
1180             // Add back max_jint to restore the ref_count field to its
1181             // proper value.
1182             Atomic::add(max_jint, &take->_ref_count);
1183 
1184             assert(take->ref_count() >= 0, "must not be negative: ref_count=%d",
1185                    take->ref_count());
1186           }
1187         }
1188         guarantee(!take->is_busy(), "invariant");
1189         take->Recycle();
1190         assert(take->is_free(), "invariant");
1191         omRelease(Self, take, false);
1192       }
1193       Thread::muxRelease(&gListLock);
1194       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1195       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1196 
1197       const int mx = MonitorBound;


1765            p2i(mid->object()));
1766     assert(mid->is_free(), "invariant");
1767 
1768     // Move the object to the working free list defined by freeHeadp, freeTailp
1769     if (*freeHeadp == NULL) *freeHeadp = mid;
1770     if (*freeTailp != NULL) {
1771       ObjectMonitor * prevtail = *freeTailp;
1772       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1773       prevtail->FreeNext = mid;
1774     }
1775     *freeTailp = mid;
1776     deflated = true;
1777   }
1778   return deflated;
1779 }
1780 
1781 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
1782 // Returns true if it was deflated and false otherwise.
1783 //
1784 // The async deflation protocol sets owner to DEFLATER_MARKER and
1785 // makes contentions negative as signals to contending threads that
1786 // an async deflation is in progress. There are a number of checks
1787 // as part of the protocol to make sure that the calling thread has
1788 // not lost the race to a contending thread or to a thread that just
1789 // wants to use the ObjectMonitor*.
1790 //
1791 // The ObjectMonitor has been successfully async deflated when:
1792 // (owner == DEFLATER_MARKER && contentions < 0 && ref_count < 0).
1793 // Contending threads or ObjectMonitor* using threads that see those
1794 // values know to retry their operation.
1795 //
1796 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
1797                                                   ObjectMonitor** freeHeadp,
1798                                                   ObjectMonitor** freeTailp) {
1799   assert(AsyncDeflateIdleMonitors, "sanity check");
1800   assert(Thread::current()->is_Java_thread(), "precondition");
1801   // A newly allocated ObjectMonitor should not be seen here so we
1802   // avoid an endless inflate/deflate cycle.
1803   assert(mid->is_old(), "must be old: allocation_state=%d",
1804          (int) mid->allocation_state());
1805 
1806   if (mid->is_busy() || mid->ref_count() != 0) {
1807     // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
1808     // is in use so no deflation.
1809     return false;
1810   }
1811 
1812   if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) {
1813     // ObjectMonitor is not owned by another thread. Our setting
1814     // owner to DEFLATER_MARKER forces any contending thread through
1815     // the slow path. This is just the first part of the async
1816     // deflation dance.
1817 
1818     if (mid->_waiters != 0 || mid->ref_count() != 0) {
1819       // Another thread has raced to enter the ObjectMonitor after
1820       // mid->is_busy() above and has already waited on it which
1821       // makes it busy so no deflation. Or the ObjectMonitor* is
1822       // in use for some other operation like inflate(). Restore
1823       // owner to NULL if it is still DEFLATER_MARKER.
1824       Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
1825       return false;
1826     }
1827 
1828     if (Atomic::cmpxchg(-max_jint, &mid->_contentions, (jint)0) == 0) {
1829       // Make contentions negative to force any contending threads to
1830       // retry. This is the second part of the async deflation dance.

1831 
1832       if (mid->_owner == DEFLATER_MARKER &&
1833           Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) {
1834         // If owner is still DEFLATER_MARKER, then we have successfully
1835         // signaled any contending threads to retry. If it is not, then we
1836         // have lost the race to an entering thread and the ObjectMonitor
1837         // is now busy. If we cannot make ref_count negative (because the
1838         // ObjectMonitor* is in use), then we have lost that race instead.
1839         // This is the third and final part of the async deflation dance.
1840         // Note: This owner check solves the ABA problem with contentions
1841         // where another thread acquired the ObjectMonitor, finished
1842         // using it and restored the contentions to zero.
1843         // Note: Making ref_count negative solves the race with
1844         // ObjectMonitor::save_om_ptr() where its ref_count increment
1845         // happens after the first ref_count check in this function.
1846         // Note: Making ref_count negative must happen after the third
1847         // part check of "owner == DEFLATER_MARKER". When save_om_ptr()
1848         // retries, it will call install_displaced_markword_in_object()
1849         // which will disconnect the object from the ObjectMonitor so
1850         // deflation must happen.
1851 
1852         // Sanity checks for the races:


1853         guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters);
1854         guarantee(mid->_cxq == NULL, "must be no contending threads: cxq="
1855                   INTPTR_FORMAT, p2i(mid->_cxq));
1856         guarantee(mid->_EntryList == NULL,
1857                   "must be no entering threads: EntryList=" INTPTR_FORMAT,
1858                   p2i(mid->_EntryList));
1859 
1860         const oop obj = (oop) mid->object();
1861         if (log_is_enabled(Trace, monitorinflation)) {
1862           ResourceMark rm;
1863           log_trace(monitorinflation)("deflate_monitor_using_JT: "
1864                                       "object=" INTPTR_FORMAT ", mark="
1865                                       INTPTR_FORMAT ", type='%s'",
1866                                       p2i(obj), p2i(obj->mark()),
1867                                       obj->klass()->external_name());
1868         }
1869 
1870         // Install the old mark word if nobody else has already done it.
1871         mid->install_displaced_markword_in_object(obj);
1872         mid->clear_using_JT();


1881         if (*freeHeadp == NULL) {
1882           // First one on the list.
1883           *freeHeadp = mid;
1884         }
1885         if (*freeTailp != NULL) {
1886           // We append to the list so the caller can use mid->FreeNext
1887           // to fix the linkages in its context.
1888           ObjectMonitor * prevtail = *freeTailp;
1889           // Should have been cleaned up by the caller:
1890           assert(prevtail->FreeNext == NULL, "must be NULL: FreeNext="
1891                  INTPTR_FORMAT, p2i(prevtail->FreeNext));
1892           prevtail->FreeNext = mid;
1893         }
1894         *freeTailp = mid;
1895 
1896         // At this point, mid->FreeNext still refers to its current
1897         // value and another ObjectMonitor's FreeNext field still
1898         // refers to this ObjectMonitor. Those linkages have to be
1899         // cleaned up by the caller who has the complete context.
1900 
1901         // We leave owner == DEFLATER_MARKER and contentions < 0
1902         // to force any racing threads to retry.
1903         return true;  // Success, ObjectMonitor has been deflated.
1904       }
1905 
1906       // The owner was changed from DEFLATER_MARKER or ObjectMonitor*
1907       // is in use so we lost the race since the ObjectMonitor is now
1908       // busy.
1909 
1910       // Restore owner to NULL if it is still DEFLATER_MARKER:
1911       Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
1912 
1913       // Add back max_jint to restore the contentions field to its
1914       // proper value (which may not be what we saw above):
1915       Atomic::add(max_jint, &mid->_contentions);
1916 
1917       assert(mid->_contentions >= 0, "must not be negative: contentions=%d",
1918              mid->_contentions);

1919     }
1920 
1921     // The contentions was no longer 0 so we lost the race since the
1922     // ObjectMonitor is now busy.
1923     assert(mid->_owner != DEFLATER_MARKER, "must not be DEFLATER_MARKER");

1924   }
1925 
1926   // The owner field is no longer NULL so we lost the race since the
1927   // ObjectMonitor is now busy.
1928   return false;
1929 }
1930 
1931 // Walk a given monitor list, and deflate idle monitors
1932 // The given list could be a per-thread list or a global list
1933 // Caller acquires gListLock as needed.
1934 //
1935 // In the case of parallel processing of thread local monitor lists,
1936 // work is done by Threads::parallel_threads_do() which ensures that
1937 // each Java thread is processed by exactly one worker thread, and
1938 // thus avoid conflicts that would arise when worker threads would
1939 // process the same monitor lists concurrently.
1940 //
1941 // See also ParallelSPCleanupTask and
1942 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1943 // Threads::parallel_java_threads_do() in thread.cpp.




 333     // If the object is stack-locked by the current thread, try to
 334     // swing the displaced header from the BasicLock back to the mark.
 335     assert(dhw->is_neutral(), "invariant");
 336     if (object->cas_set_mark(dhw, mark) == mark) {
 337       return;
 338     }
 339   }
 340 
 341   // We have to take the slow-path of possible inflation and then exit.
 342   ObjectMonitorHandle omh;
 343   inflate(&omh, THREAD, object, inflate_cause_vm_internal);
 344   omh.om_ptr()->exit(true, THREAD);
 345 }
 346 
 347 // -----------------------------------------------------------------------------
 348 // Interpreter/Compiler Slow Case
 349 // This routine is used to handle interpreter/compiler slow case
 350 // We don't need to use fast path here, because it must have been
 351 // failed in the interpreter/compiler code.
 352 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {


 353   markOop mark = obj->mark();
 354   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 355 
 356   if (mark->is_neutral()) {
 357     // Anticipate successful CAS -- the ST of the displaced mark must
 358     // be visible <= the ST performed by the CAS.
 359     lock->set_displaced_header(mark);
 360     if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
 361       return;
 362     }
 363     // Fall through to inflate() ...
 364   } else if (mark->has_locker() &&
 365              THREAD->is_lock_owned((address)mark->locker())) {
 366     assert(lock != mark->locker(), "must not re-lock the same lock");
 367     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 368     lock->set_displaced_header(NULL);
 369     return;
 370   }
 371 
 372   // The object header will never be displaced to this lock,
 373   // so it does not matter what the value is, except that it
 374   // must be non-zero to avoid looking like a re-entrant lock,
 375   // and must not look locked either.
 376   lock->set_displaced_header(markOopDesc::unused_mark());
 377   ObjectMonitorHandle omh;
 378   inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter);
 379   omh.om_ptr()->enter(THREAD);

 380 }
 381 
 382 // This routine is used to handle interpreter/compiler slow case
 383 // We don't need to use fast path here, because it must have
 384 // failed in the interpreter/compiler code. Simply use the heavy
 385 // weight monitor should be ok, unless someone find otherwise.
 386 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 387   fast_exit(object, lock, THREAD);
 388 }
 389 
 390 // -----------------------------------------------------------------------------
 391 // Class Loader  support to workaround deadlocks on the class loader lock objects
 392 // Also used by GC
 393 // complete_exit()/reenter() are used to wait on a nested lock
 394 // i.e. to give up an outer lock completely and then re-enter
 395 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 396 //  1) complete_exit lock1 - saving recursion count
 397 //  2) wait on lock2
 398 //  3) when notified on lock2, unlock lock2
 399 //  4) reenter lock1 with original recursion count


 401 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 402 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 403   if (UseBiasedLocking) {
 404     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 405     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 406   }
 407 
 408   ObjectMonitorHandle omh;
 409   inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
 410   intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD);
 411   return ret_code;
 412 }
 413 
 414 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 415 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 416   if (UseBiasedLocking) {
 417     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 418     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 419   }
 420 


 421   ObjectMonitorHandle omh;
 422   inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
 423   omh.om_ptr()->reenter(recursion, THREAD);

 424 }
 425 // -----------------------------------------------------------------------------
 426 // JNI locks on java objects
 427 // NOTE: must use heavy weight monitor to handle jni monitor enter
 428 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 429   // the current locking is from JNI instead of Java code
 430   if (UseBiasedLocking) {
 431     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 432     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 433   }
 434   THREAD->set_current_pending_monitor_is_from_java(false);


 435   ObjectMonitorHandle omh;
 436   inflate(&omh, THREAD, obj(), inflate_cause_jni_enter);
 437   omh.om_ptr()->enter(THREAD);

 438   THREAD->set_current_pending_monitor_is_from_java(true);
 439 }
 440 
 441 // NOTE: must use heavy weight monitor to handle jni monitor exit
 442 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 443   if (UseBiasedLocking) {
 444     Handle h_obj(THREAD, obj);
 445     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 446     obj = h_obj();
 447   }
 448   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 449 
 450   ObjectMonitorHandle omh;
 451   inflate(&omh, THREAD, obj, inflate_cause_jni_exit);
 452   ObjectMonitor * monitor = omh.om_ptr();
 453   // If this thread has locked the object, exit the monitor.  Note:  can't use
 454   // monitor->check(CHECK); must exit even if an exception is pending.
 455   if (monitor->check(THREAD)) {
 456     monitor->exit(true, THREAD);
 457   }


1148 
1149     // 2: try to allocate from the global gFreeList
1150     // CONSIDER: use muxTry() instead of muxAcquire().
1151     // If the muxTry() fails then drop immediately into case 3.
1152     // If we're using thread-local free lists then try
1153     // to reprovision the caller's free list.
1154     if (gFreeList != NULL) {
1155       // Reprovision the thread's omFreeList.
1156       // Use bulk transfers to reduce the allocation rate and heat
1157       // on various locks.
1158       Thread::muxAcquire(&gListLock, "omAlloc(1)");
1159       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1160         gMonitorFreeCount--;
1161         ObjectMonitor * take = gFreeList;
1162         gFreeList = take->FreeNext;
1163         guarantee(take->object() == NULL, "invariant");
1164         if (AsyncDeflateIdleMonitors) {
1165           // Clear any values we allowed to linger during async deflation.
1166           take->_header = NULL;
1167           take->set_owner(NULL);

1168 
1169           if (take->ref_count() < 0) {
1170             // Add back max_jint to restore the ref_count field to its
1171             // proper value.
1172             Atomic::add(max_jint, &take->_ref_count);
1173 
1174             assert(take->ref_count() >= 0, "must not be negative: ref_count=%d",
1175                    take->ref_count());
1176           }
1177         }
1178         guarantee(!take->is_busy(), "invariant");
1179         take->Recycle();
1180         assert(take->is_free(), "invariant");
1181         omRelease(Self, take, false);
1182       }
1183       Thread::muxRelease(&gListLock);
1184       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1185       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1186 
1187       const int mx = MonitorBound;


1755            p2i(mid->object()));
1756     assert(mid->is_free(), "invariant");
1757 
1758     // Move the object to the working free list defined by freeHeadp, freeTailp
1759     if (*freeHeadp == NULL) *freeHeadp = mid;
1760     if (*freeTailp != NULL) {
1761       ObjectMonitor * prevtail = *freeTailp;
1762       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1763       prevtail->FreeNext = mid;
1764     }
1765     *freeTailp = mid;
1766     deflated = true;
1767   }
1768   return deflated;
1769 }
1770 
1771 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
1772 // Returns true if it was deflated and false otherwise.
1773 //
1774 // The async deflation protocol sets owner to DEFLATER_MARKER and
1775 // makes ref_count negative as signals to contending threads that
1776 // an async deflation is in progress. There are a number of checks
1777 // as part of the protocol to make sure that the calling thread has
1778 // not lost the race to a contending thread or to a thread that just
1779 // wants to use the ObjectMonitor*.
1780 //
1781 // The ObjectMonitor has been successfully async deflated when:
1782 // (owner == DEFLATER_MARKER && ref_count < 0)
1783 // Contending threads or ObjectMonitor* using threads that see those
1784 // values know to retry their operation.
1785 //
1786 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
1787                                                   ObjectMonitor** freeHeadp,
1788                                                   ObjectMonitor** freeTailp) {
1789   assert(AsyncDeflateIdleMonitors, "sanity check");
1790   assert(Thread::current()->is_Java_thread(), "precondition");
1791   // A newly allocated ObjectMonitor should not be seen here so we
1792   // avoid an endless inflate/deflate cycle.
1793   assert(mid->is_old(), "must be old: allocation_state=%d",
1794          (int) mid->allocation_state());
1795 
1796   if (mid->is_busy() || mid->ref_count() != 0) {
1797     // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
1798     // is in use so no deflation.
1799     return false;
1800   }
1801 
1802   if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) {
1803     // ObjectMonitor is not owned by another thread. Our setting
1804     // owner to DEFLATER_MARKER forces any contending thread through
1805     // the slow path. This is just the first part of the async
1806     // deflation dance.
1807 
1808     if (mid->_contentions != 0 || mid->_waiters != 0) {
1809       // Another thread has raced to enter the ObjectMonitor after
1810       // mid->is_busy() above or has already entered and waited on
1811       // it which makes it busy so no deflation. Restore owner to
1812       // NULL if it is still DEFLATER_MARKER.

1813       Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
1814       return false;
1815     }
1816 
1817     if (Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) {
1818       // Make ref_count negative to force any contending threads or
1819       // ObjectMonitor* using threads to retry. This is the second
1820       // part of the async deflation dance.
1821 
1822       if (mid->_owner == DEFLATER_MARKER) {

1823         // If owner is still DEFLATER_MARKER, then we have successfully
1824         // signaled any contending threads to retry. If it is not, then we
1825         // have lost the race to an entering thread and the ObjectMonitor
1826         // is now busy. This is the third and final part of the async
1827         // deflation dance.
1828         // Note: This owner check solves the ABA problem with ref_count

1829         // where another thread acquired the ObjectMonitor, finished
1830         // using it and restored the ref_count to zero.








1831 
1832         // Sanity checks for the races:
1833         guarantee(mid->_contentions == 0, "must be 0: contentions=%d",
1834                   mid->_contentions);
1835         guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters);
1836         guarantee(mid->_cxq == NULL, "must be no contending threads: cxq="
1837                   INTPTR_FORMAT, p2i(mid->_cxq));
1838         guarantee(mid->_EntryList == NULL,
1839                   "must be no entering threads: EntryList=" INTPTR_FORMAT,
1840                   p2i(mid->_EntryList));
1841 
1842         const oop obj = (oop) mid->object();
1843         if (log_is_enabled(Trace, monitorinflation)) {
1844           ResourceMark rm;
1845           log_trace(monitorinflation)("deflate_monitor_using_JT: "
1846                                       "object=" INTPTR_FORMAT ", mark="
1847                                       INTPTR_FORMAT ", type='%s'",
1848                                       p2i(obj), p2i(obj->mark()),
1849                                       obj->klass()->external_name());
1850         }
1851 
1852         // Install the old mark word if nobody else has already done it.
1853         mid->install_displaced_markword_in_object(obj);
1854         mid->clear_using_JT();


1863         if (*freeHeadp == NULL) {
1864           // First one on the list.
1865           *freeHeadp = mid;
1866         }
1867         if (*freeTailp != NULL) {
1868           // We append to the list so the caller can use mid->FreeNext
1869           // to fix the linkages in its context.
1870           ObjectMonitor * prevtail = *freeTailp;
1871           // Should have been cleaned up by the caller:
1872           assert(prevtail->FreeNext == NULL, "must be NULL: FreeNext="
1873                  INTPTR_FORMAT, p2i(prevtail->FreeNext));
1874           prevtail->FreeNext = mid;
1875         }
1876         *freeTailp = mid;
1877 
1878         // At this point, mid->FreeNext still refers to its current
1879         // value and another ObjectMonitor's FreeNext field still
1880         // refers to this ObjectMonitor. Those linkages have to be
1881         // cleaned up by the caller who has the complete context.
1882 
1883         // We leave owner == DEFLATER_MARKER and ref_count < 0
1884         // to force any racing threads to retry.
1885         return true;  // Success, ObjectMonitor has been deflated.
1886       }
1887 
1888       // The owner was changed from DEFLATER_MARKER so we lost the
1889       // race since the ObjectMonitor is now busy.

1890 
1891       // Add back max_jint to restore the ref_count field to its



1892       // proper value (which may not be what we saw above):
1893       Atomic::add(max_jint, &mid->_ref_count);
1894 
1895       assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d",
1896              mid->ref_count());
1897       return false;
1898     }
1899 
1900     // The ref_count was no longer 0 so we lost the race since the
1901     // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
1902     // Restore owner to NULL if it is still DEFLATER_MARKER:
1903     Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
1904   }
1905 
1906   // The owner field is no longer NULL so we lost the race since the
1907   // ObjectMonitor is now busy.
1908   return false;
1909 }
1910 
1911 // Walk a given monitor list, and deflate idle monitors
1912 // The given list could be a per-thread list or a global list
1913 // Caller acquires gListLock as needed.
1914 //
1915 // In the case of parallel processing of thread local monitor lists,
1916 // work is done by Threads::parallel_threads_do() which ensures that
1917 // each Java thread is processed by exactly one worker thread, and
1918 // thus avoid conflicts that would arise when worker threads would
1919 // process the same monitor lists concurrently.
1920 //
1921 // See also ParallelSPCleanupTask and
1922 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1923 // Threads::parallel_java_threads_do() in thread.cpp.


< prev index next >