731 Handle hobj(Self, obj);
732 // Relaxing assertion for bug 6320749.
733 assert(Universe::verify_in_progress() ||
734 !SafepointSynchronize::is_at_safepoint(),
735 "biases should not be seen by VM thread here");
736 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
737 obj = hobj();
738 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
739 }
740 }
741
742 // hashCode() is a heap mutator ...
743 // Relaxing assertion for bug 6320749.
744 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
745 !SafepointSynchronize::is_at_safepoint(), "invariant");
746 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
747 Self->is_Java_thread() , "invariant");
748 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
749 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
750
751 Retry:
752 ObjectMonitor* monitor = NULL;
753 markOop temp, test;
754 intptr_t hash;
755 markOop mark = ReadStableMark(obj);
756
757 // object should remain ineligible for biased locking
758 assert(!mark->has_bias_pattern(), "invariant");
759
760 if (mark->is_neutral()) {
761 hash = mark->hash(); // this is a normal header
762 if (hash != 0) { // if it has hash, just return it
763 return hash;
764 }
765 hash = get_next_hash(Self, obj); // allocate a new hash code
766 temp = mark->copy_set_hash(hash); // merge the hash code into header
767 // use (machine word version) atomic operation to install the hash
768 test = obj->cas_set_mark(temp, mark);
769 if (test == mark) {
770 return hash;
771 }
772 // If atomic operation failed, we must inflate the header
773 // into heavy weight monitor. We could add more code here
774 // for fast path, but it does not worth the complexity.
775 } else if (mark->has_monitor()) {
776 ObjectMonitorHandle omh;
777 if (!omh.save_om_ptr(obj, mark)) {
778 // Lost a race with async deflation so try again.
779 assert(AsyncDeflateIdleMonitors, "sanity check");
780 goto Retry;
781 }
782 monitor = omh.om_ptr();
783 temp = monitor->header();
784 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
785 hash = temp->hash();
786 if (hash != 0) {
787 return hash;
788 }
789 // Skip to the following code to reduce code size
790 } else if (Self->is_lock_owned((address)mark->locker())) {
791 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
792 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
793 hash = temp->hash(); // by current thread, check if the displaced
794 if (hash != 0) { // header contains hash code
795 return hash;
796 }
797 // WARNING:
798 // The displaced header in the BasicLock on a thread's stack
799 // is strictly immutable. It CANNOT be changed in ANY cases.
800 // So we have to inflate the stack lock into an ObjectMonitor
801 // even if the current thread owns the lock. The BasicLock on
802 // a thread's stack can be asynchronously read by other threads
803 // during an inflate() call so any change to that stack memory
804 // may not propagate to other threads correctly.
805 }
806
807 // Inflate the monitor to set hash code
808 ObjectMonitorHandle omh;
809 inflate(&omh, Self, obj, inflate_cause_hash_code);
810 monitor = omh.om_ptr();
811 // Load displaced header and check it has hash code
812 mark = monitor->header();
813 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
814 hash = mark->hash();
815 if (hash == 0) {
816 hash = get_next_hash(Self, obj);
817 temp = mark->copy_set_hash(hash); // merge hash code into header
818 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
819 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
820 if (test != mark) {
821 // The only update to the ObjectMonitor's header/dmw field
822 // is to merge in the hash code. If someone adds a new usage
823 // of the header/dmw field, please update this code.
824 hash = test->hash();
825 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test));
826 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
827 }
828 }
829 // We finally get the hash
830 return hash;
831 }
832
833 // Deprecated -- use FastHashCode() instead.
834
835 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
836 return FastHashCode(Thread::current(), obj());
837 }
838
839
840 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
841 Handle h_obj) {
842 if (UseBiasedLocking) {
843 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
844 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
845 }
846
847 assert(thread == JavaThread::current(), "Can only be called on current thread");
848 oop obj = h_obj();
849
850 while (true) {
1148 Self->omInUseCount++;
1149 return m;
1150 }
1151
1152 // 2: try to allocate from the global gFreeList
1153 // CONSIDER: use muxTry() instead of muxAcquire().
1154 // If the muxTry() fails then drop immediately into case 3.
1155 // If we're using thread-local free lists then try
1156 // to reprovision the caller's free list.
1157 if (gFreeList != NULL) {
1158 // Reprovision the thread's omFreeList.
1159 // Use bulk transfers to reduce the allocation rate and heat
1160 // on various locks.
1161 Thread::muxAcquire(&gListLock, "omAlloc(1)");
1162 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1163 gMonitorFreeCount--;
1164 ObjectMonitor * take = gFreeList;
1165 gFreeList = take->FreeNext;
1166 guarantee(take->object() == NULL, "invariant");
1167 if (AsyncDeflateIdleMonitors) {
1168 take->set_owner(NULL);
1169 take->_contentions = 0;
1170 }
1171 guarantee(!take->is_busy(), "invariant");
1172 take->Recycle();
1173 assert(take->is_free(), "invariant");
1174 omRelease(Self, take, false);
1175 }
1176 Thread::muxRelease(&gListLock);
1177 Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1178 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1179
1180 const int mx = MonitorBound;
1181 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1182 // We can't safely induce a STW safepoint from omAlloc() as our thread
1183 // state may not be appropriate for such activities and callers may hold
1184 // naked oops, so instead we defer the action.
1185 InduceScavenge(Self, "omAlloc");
1186 }
1187 continue;
1327 // run at the same time as omFlush() so we have to be careful.
1328
1329 void ObjectSynchronizer::omFlush(Thread * Self) {
1330 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL
1331 ObjectMonitor * tail = NULL;
1332 int tally = 0;
1333 if (list != NULL) {
1334 ObjectMonitor * s;
1335 // The thread is going away, the per-thread free monitors
1336 // are freed via set_owner(NULL)
1337 // Link them to tail, which will be linked into the global free list
1338 // gFreeList below, under the gListLock
1339 for (s = list; s != NULL; s = s->FreeNext) {
1340 tally++;
1341 tail = s;
1342 guarantee(s->object() == NULL, "invariant");
1343 guarantee(!s->is_busy(), "invariant");
1344 s->set_owner(NULL); // redundant but good hygiene
1345 }
1346 guarantee(tail != NULL, "invariant");
1347 guarantee(Self->omFreeCount == tally, "free-count off");
1348 Self->omFreeList = NULL;
1349 Self->omFreeCount = 0;
1350 }
1351
1352 ObjectMonitor * inUseList = Self->omInUseList;
1353 ObjectMonitor * inUseTail = NULL;
1354 int inUseTally = 0;
1355 if (inUseList != NULL) {
1356 ObjectMonitor *cur_om;
1357 // The thread is going away, however the omInUseList inflated
1358 // monitors may still be in-use by other threads.
1359 // Link them to inUseTail, which will be linked into the global in-use list
1360 // gOmInUseList below, under the gListLock
1361 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1362 inUseTail = cur_om;
1363 inUseTally++;
1364 guarantee(cur_om->is_active(), "invariant");
1365 }
1366 guarantee(inUseTail != NULL, "invariant");
1367 guarantee(Self->omInUseCount == inUseTally, "in-use count off");
1368 Self->omInUseList = NULL;
1369 Self->omInUseCount = 0;
1370 }
1371
1372 Thread::muxAcquire(&gListLock, "omFlush");
1373 if (tail != NULL) {
1374 tail->FreeNext = gFreeList;
1375 gFreeList = list;
1376 gMonitorFreeCount += tally;
1377 }
1378
1379 if (inUseTail != NULL) {
1380 inUseTail->FreeNext = gOmInUseList;
1381 gOmInUseList = inUseList;
1382 gOmInUseCount += inUseTally;
1383 }
1384
1385 Thread::muxRelease(&gListLock);
1386
1387 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1542 // value from the basiclock on the owner's stack to the objectMonitor, all
1543 // the while preserving the hashCode stability invariants. If the owner
1544 // decides to release the lock while the value is 0, the unlock will fail
1545 // and control will eventually pass from slow_exit() to inflate. The owner
1546 // will then spin, waiting for the 0 value to disappear. Put another way,
1547 // the 0 causes the owner to stall if the owner happens to try to
1548 // drop the lock (restoring the header from the basiclock to the object)
1549 // while inflation is in-progress. This protocol avoids races that might
1550 // would otherwise permit hashCode values to change or "flicker" for an object.
1551 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1552 // 0 serves as a "BUSY" inflate-in-progress indicator.
1553
1554
1555 // fetch the displaced mark from the owner's stack.
1556 // The owner can't die or unwind past the lock while our INFLATING
1557 // object is in the mark. Furthermore the owner can't complete
1558 // an unlock on the object, either.
1559 markOop dmw = mark->displaced_mark_helper();
1560 // Catch if the object's header is not neutral (not locked and
1561 // not marked is what we care about here).
1562 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1563
1564 // Setup monitor fields to proper values -- prepare the monitor
1565 m->set_header(dmw);
1566
1567 // Optimization: if the mark->locker stack address is associated
1568 // with this thread we could simply set m->_owner = Self.
1569 // Note that a thread can inflate an object
1570 // that it has stack-locked -- as might happen in wait() -- directly
1571 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1572 m->set_owner(mark->locker());
1573 m->set_object(object);
1574 // TODO-FIXME: assert BasicLock->dhw != 0.
1575
1576 // Must preserve store ordering. The monitor state must
1577 // be stable at the time of publishing the monitor address.
1578 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1579 object->release_set_mark(markOopDesc::encode(m));
1580
1581 // Hopefully the performance counters are allocated on distinct cache lines
1582 // to avoid false sharing on MP systems ...
1583 OM_PERFDATA_OP(Inflations, inc());
1584 if (log_is_enabled(Trace, monitorinflation)) {
1585 ResourceMark rm(Self);
1586 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1587 INTPTR_FORMAT ", type='%s'", p2i(object),
1588 p2i(object->mark()), object->klass()->external_name());
1589 }
1590 if (event.should_commit()) {
1591 post_monitor_inflate_event(&event, object, cause);
1592 }
1593 assert(!m->is_free(), "post-condition");
1594 omh_p->set_om_ptr(m);
1595 return;
1596 }
1597
1598 // CASE: neutral
1599 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1600 // If we know we're inflating for entry it's better to inflate by swinging a
1601 // pre-locked objectMonitor pointer into the object header. A successful
1602 // CAS inflates the object *and* confers ownership to the inflating thread.
1603 // In the current implementation we use a 2-step mechanism where we CAS()
1604 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1605 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1606 // would be useful.
1607
1608 // Catch if the object's header is not neutral (not locked and
1609 // not marked is what we care about here).
1610 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
1611 ObjectMonitor * m;
1612 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) {
1613 // If !AsyncDeflateIdleMonitors or if an internal inflation, then
1614 // we won't stop for a potential safepoint in omAlloc.
1615 m = omAlloc(Self, cause);
1616 } else {
1617 // If AsyncDeflateIdleMonitors and not an internal inflation, then
1618 // we may stop for a safepoint in omAlloc() so protect object.
1619 Handle h_obj(Self, object);
1620 m = omAlloc(Self, cause);
1621 object = h_obj(); // Refresh object.
1622 }
1623 // prepare m for installation - set monitor to initial state
1624 m->Recycle();
1625 m->set_header(mark);
1626 m->set_owner(NULL);
1627 m->set_object(object);
1628 m->_recursions = 0;
1629 m->_Responsible = NULL;
1630 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
1631
1632 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1633 m->set_header(NULL);
1634 m->set_object(NULL);
1635 m->Recycle();
1636 omRelease(Self, m, true);
1637 m = NULL;
1638 continue;
1639 // interference - the markword changed - just retry.
1640 // The state-transitions are one-way, so there's no chance of
1641 // live-lock -- "Inflated" is an absorbing state.
1642 }
1643
1644 // Hopefully the performance counters are allocated on distinct
1645 // cache lines to avoid false sharing on MP systems ...
1646 OM_PERFDATA_OP(Inflations, inc());
1647 if (log_is_enabled(Trace, monitorinflation)) {
1648 ResourceMark rm(Self);
1649 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1650 INTPTR_FORMAT ", type='%s'", p2i(object),
1651 p2i(object->mark()), object->klass()->external_name());
1652 }
1653 if (event.should_commit()) {
1654 post_monitor_inflate_event(&event, object, cause);
1655 }
1656 omh_p->set_om_ptr(m);
1657 return;
1658 }
1659 }
1660
1661
1662 // We maintain a list of in-use monitors for each thread.
1663 //
1664 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1665 // deflate_idle_monitors() scans only a global list of in-use monitors which
1666 // is populated only as a thread dies (see omFlush()).
1667 //
1668 // These operations are called at all safepoints, immediately after mutators
1669 // are stopped, but before any objects have moved. Collectively they traverse
1670 // the population of in-use monitors, deflating where possible. The scavenged
1671 // monitors are returned to the global monitor free list.
1672 //
1673 // Beware that we scavenge at *every* stop-the-world point. Having a large
1674 // number of monitors in-use could negatively impact performance. We also want
1675 // to minimize the total # of monitors in circulation, as they incur a small
1676 // footprint penalty.
1677 //
1678 // Perversely, the heap size -- and thus the STW safepoint rate --
1679 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1680 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1681 // This is an unfortunate aspect of this design.
1682
1683 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) {
1684 if (!AsyncDeflateIdleMonitors) {
1685 // Use the older mechanism for the global in-use list.
1686 ObjectSynchronizer::deflate_idle_monitors(_counters);
1687 return;
1688 }
1689
1690 assert(_counters == NULL, "not used with AsyncDeflateIdleMonitors");
1691
1692 log_debug(monitorinflation)("requesting deflation of idle monitors.");
1693 // Request deflation of global idle monitors by the ServiceThread:
1694 _gOmShouldDeflateIdleMonitors = true;
1695 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1696 Service_lock->notify_all();
1697
1698 // Request deflation of per-thread idle monitors by each JavaThread:
1699 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1700 if (jt->omInUseCount > 0) {
1701 // This JavaThread is using monitors so check it.
1702 jt->omShouldDeflateIdleMonitors = true;
1703 }
1704 }
1705 }
1706
1707 // Deflate a single monitor if not in-use
1708 // Return true if deflated, false if in-use
1709 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1710 ObjectMonitor** freeHeadp,
1711 ObjectMonitor** freeTailp) {
1712 bool deflated;
1713 // Normal case ... The monitor is associated with obj.
1714 const markOop mark = obj->mark();
1715 guarantee(mark == markOopDesc::encode(mid), "should match: mark="
1716 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
1717 p2i(markOopDesc::encode(mid)));
1718 // Make sure that mark->monitor() and markOopDesc::encode() agree:
1719 guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1720 ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
1721 const markOop dmw = mid->header();
1722 guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1723
1724 if (mid->is_busy()) {
1742 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1743 p2i(mid->object()));
1744 assert(mid->is_free(), "invariant");
1745
1746 // Move the object to the working free list defined by freeHeadp, freeTailp
1747 if (*freeHeadp == NULL) *freeHeadp = mid;
1748 if (*freeTailp != NULL) {
1749 ObjectMonitor * prevtail = *freeTailp;
1750 assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1751 prevtail->FreeNext = mid;
1752 }
1753 *freeTailp = mid;
1754 deflated = true;
1755 }
1756 return deflated;
1757 }
1758
1759 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
1760 // Returns true if it was deflated and false otherwise.
1761 //
1762 // The async deflation protocol sets _owner to DEFLATER_MARKER and
1763 // makes _contentions negative as signals to contending threads that
1764 // an async deflation is in progress. There are a number of checks
1765 // as part of the protocol to make sure that the calling thread has
1766 // not lost the race to a contending thread.
1767 //
1768 // The ObjectMonitor has been successfully async deflated when:
1769 // (_owner == DEFLATER_MARKER && _contentions < 0). Contending threads
1770 // that see those values know to retry their operation.
1771 //
1772 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
1773 ObjectMonitor** freeHeadp,
1774 ObjectMonitor** freeTailp) {
1775 assert(AsyncDeflateIdleMonitors, "sanity check");
1776 assert(Thread::current()->is_Java_thread(), "precondition");
1777 // A newly allocated ObjectMonitor should not be seen here so we
1778 // avoid an endless inflate/deflate cycle.
1779 assert(mid->is_old(), "precondition");
1780
1781 if (mid->is_busy() || mid->ref_count() != 0) {
1782 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
1783 // is in use so no deflation.
1784 return false;
1785 }
1786
1787 if (Atomic::cmpxchg(DEFLATER_MARKER, &mid->_owner, (void*)NULL) == NULL) {
1788 // ObjectMonitor is not owned by another thread. Our setting
1789 // _owner to DEFLATER_MARKER forces any contending thread through
1790 // the slow path. This is just the first part of the async
1791 // deflation dance.
1792
1793 if (mid->_waiters != 0 || mid->ref_count() != 0) {
1794 // Another thread has raced to enter the ObjectMonitor after
1795 // mid->is_busy() above and has already waited on it which
1796 // makes it busy so no deflation. Or the ObjectMonitor* is
1797 // in use for some other operation like inflate(). Restore
1798 // _owner to NULL if it is still DEFLATER_MARKER.
1799 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
1800 return false;
1801 }
1802
1803 if (Atomic::cmpxchg(-max_jint, &mid->_contentions, (jint)0) == 0) {
1804 // Make _contentions negative to force racing threads to retry.
1805 // This is the second part of the async deflation dance.
1806
1807 if (mid->_owner == DEFLATER_MARKER) {
1808 // If _owner is still DEFLATER_MARKER, then we have successfully
1809 // signaled any racing threads to retry. If it is not, then we
1810 // have lost the race to another thread and the ObjectMonitor is
1811 // now busy. This is the third and final part of the async
1812 // deflation dance.
1813 // Note: This _owner check solves the ABA problem with _contentions
1814 // where another thread acquired the ObjectMonitor, finished
1815 // using it and restored the _contentions to zero.
1816
1817 // Sanity checks for the races:
1818 guarantee(mid->_waiters == 0, "should be no waiters");
1819 guarantee(mid->_cxq == NULL, "should be no contending threads");
1820 guarantee(mid->_EntryList == NULL, "should be no entering threads");
1821
1822 if (log_is_enabled(Trace, monitorinflation)) {
1823 oop obj = (oop) mid->object();
1824 assert(obj != NULL, "sanity check");
1825 if (obj->is_instance()) {
1826 ResourceMark rm;
1827 log_trace(monitorinflation)("deflate_monitor_using_JT: "
1828 "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'",
1829 p2i(obj), p2i(obj->mark()),
1830 obj->klass()->external_name());
1831 }
1832 }
1833
1834 // Install the old mark word if nobody else has already done it.
1835 mid->install_displaced_markword_in_object();
1836 mid->clear_using_JT();
1837
1838 assert(mid->object() == NULL, "invariant");
1839 assert(mid->is_free(), "invariant");
1840
1841 // Move the deflated ObjectMonitor to the working free list
1842 // defined by freeHeadp and freeTailp.
1843 if (*freeHeadp == NULL) {
1844 // First one on the list.
1845 *freeHeadp = mid;
1846 }
1847 if (*freeTailp != NULL) {
1848 // We append to the list so the caller can use mid->FreeNext
1849 // to fix the linkages in its context.
1850 ObjectMonitor * prevtail = *freeTailp;
1851 assert(prevtail->FreeNext == NULL, "not cleaned up by the caller");
1852 prevtail->FreeNext = mid;
1853 }
1854 *freeTailp = mid;
1855
1856 // At this point, mid->FreeNext still refers to its current
1857 // value and another ObjectMonitor's FreeNext field still
1858 // refers to this ObjectMonitor. Those linkages have to be
1859 // cleaned up by the caller who has the complete context.
1860
1861 // We leave _owner == DEFLATER_MARKER and _contentions < 0
1862 // to force any racing threads to retry.
1863 return true; // Success, ObjectMonitor has been deflated.
1864 }
1865
1866 // The _owner was changed from DEFLATER_MARKER so we lost the
1867 // race since the ObjectMonitor is now busy. Add back max_jint
1868 // to restore the _contentions field to its proper value (which
1869 // may not be what we saw above).
1870 Atomic::add(max_jint, &mid->_contentions);
1871
1872 assert(mid->_contentions >= 0, "_contentions should not be negative");
1873 }
1874
1875 // The _contentions was no longer 0 so we lost the race since the
1876 // ObjectMonitor is now busy.
1877 assert(mid->_owner != DEFLATER_MARKER, "should no longer be set");
1878 }
1879
1880 // The _owner field is no longer NULL so we lost the race since the
1881 // ObjectMonitor is now busy.
1882 return false;
1883 }
1884
1885 // Walk a given monitor list, and deflate idle monitors
1886 // The given list could be a per-thread list or a global list
1887 // Caller acquires gListLock as needed.
1888 //
1889 // In the case of parallel processing of thread local monitor lists,
1890 // work is done by Threads::parallel_threads_do() which ensures that
1891 // each Java thread is processed by exactly one worker thread, and
1892 // thus avoid conflicts that would arise when worker threads would
1893 // process the same monitor lists concurrently.
1894 //
1895 // See also ParallelSPCleanupTask and
1896 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1897 // Threads::parallel_java_threads_do() in thread.cpp.
1898 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1899 ObjectMonitor** freeHeadp,
1900 ObjectMonitor** freeTailp) {
1957 // Only try to deflate if there is an associated Java object and if
1958 // mid is old (is not newly allocated and is not newly freed).
1959 if (mid->object() != NULL && mid->is_old() &&
1960 deflate_monitor_using_JT(mid, freeHeadp, freeTailp)) {
1961 // Deflation succeeded so update the in-use list.
1962 if (mid == *listHeadp) {
1963 *listHeadp = mid->FreeNext;
1964 } else if (cur_mid_in_use != NULL) {
1965 // Maintain the current in-use list.
1966 cur_mid_in_use->FreeNext = mid->FreeNext;
1967 }
1968 next = mid->FreeNext;
1969 mid->FreeNext = NULL;
1970 // At this point mid is disconnected from the in-use list
1971 // and is the current tail in the freeHeadp list.
1972 mid = next;
1973 deflated_count++;
1974 } else {
1975 // mid is considered in-use if it does not have an associated
1976 // Java object or mid is not old or deflation did not succeed.
1977 // A mid->is_new() node can be seen here when it is freshly returned
1978 // by omAlloc() (and skips the deflation code path).
1979 // A mid->is_old() node can be seen here when deflation failed.
1980 // A mid->is_free() node can be seen here when a fresh node from
1981 // omAlloc() is released by omRelease() due to losing the race
1982 // in inflate().
1983
1984 if (mid->object() != NULL && mid->is_new()) {
1985 // mid has an associated Java object and has now been seen
1986 // as newly allocated so mark it as "old".
1987 mid->set_allocation_state(ObjectMonitor::Old);
1988 }
1989 cur_mid_in_use = mid;
1990 mid = mid->FreeNext;
1991
1992 if (SafepointSynchronize::is_synchronizing() &&
1993 cur_mid_in_use != *listHeadp && cur_mid_in_use->is_old()) {
1994 // If a safepoint has started and cur_mid_in_use is not the list
1995 // head and is old, then it is safe to use as saved state. Return
1996 // to the caller so gListLock can be dropped as appropriate
1997 // before blocking.
1998 *savedMidInUsep = cur_mid_in_use;
1999 return deflated_count;
2000 }
2001 }
2002 }
2003 // We finished the list without a safepoint starting so there's
2004 // no need to save state.
2005 *savedMidInUsep = NULL;
2006 return deflated_count;
2007 }
2008
2009 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2010 counters->nInuse = 0; // currently associated with objects
2011 counters->nInCirculation = 0; // extant
2012 counters->nScavenged = 0; // reclaimed (global and per-thread)
2013 counters->perThreadScavenged = 0; // per-thread scavenge total
2014 counters->perThreadTimes = 0.0; // per-thread scavenge times
2015 }
2016
2017 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2018 assert(!AsyncDeflateIdleMonitors, "sanity check");
2019 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2020 bool deflated = false;
2021
2022 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2023 ObjectMonitor * freeTailp = NULL;
2024 elapsedTimer timer;
2025
2026 if (log_is_enabled(Info, monitorinflation)) {
2027 timer.start();
2028 }
2029
2030 // Prevent omFlush from changing mids in Thread dtor's during deflation
2031 // And in case the vm thread is acquiring a lock during a safepoint
2032 // See e.g. 6320749
2033 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
2034
2035 // Note: the thread-local monitors lists get deflated in
2036 // a separate pass. See deflate_thread_local_monitors().
2037
2038 // For moribund threads, scan gOmInUseList
2039 int deflated_count = 0;
2057 timer.stop();
2058
2059 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2060 LogStreamHandle(Info, monitorinflation) lsh_info;
2061 LogStream * ls = NULL;
2062 if (log_is_enabled(Debug, monitorinflation)) {
2063 ls = &lsh_debug;
2064 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2065 ls = &lsh_info;
2066 }
2067 if (ls != NULL) {
2068 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2069 }
2070 }
2071
2072 // Deflate global idle ObjectMonitors using a JavaThread.
2073 //
2074 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2075 assert(AsyncDeflateIdleMonitors, "sanity check");
2076 assert(Thread::current()->is_Java_thread(), "precondition");
2077 JavaThread * cur_jt = JavaThread::current();
2078
2079 _gOmShouldDeflateIdleMonitors = false;
2080
2081 int deflated_count = 0;
2082 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors
2083 ObjectMonitor * freeTailp = NULL;
2084 ObjectMonitor * savedMidInUsep = NULL;
2085 elapsedTimer timer;
2086
2087 if (log_is_enabled(Info, monitorinflation)) {
2088 timer.start();
2089 }
2090 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)");
2091 OM_PERFDATA_OP(MonExtant, set_value(gOmInUseCount));
2092
2093 do {
2094 int local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp, &savedMidInUsep);
2095 gOmInUseCount -= local_deflated_count;
2096 deflated_count += local_deflated_count;
2097
2098 if (freeHeadp != NULL) {
2099 // Move the scavenged ObjectMonitors to the global free list.
2100 guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count);
2101 assert(freeTailp->FreeNext == NULL, "invariant");
2102
2103 // Constant-time list splice - prepend scavenged segment to gFreeList.
2104 freeTailp->FreeNext = gFreeList;
2105 gFreeList = freeHeadp;
2106
2107 gMonitorFreeCount += local_deflated_count;
2108 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2109 }
2110
2111 if (savedMidInUsep != NULL) {
2112 // deflate_monitor_list_using_JT() detected a safepoint starting.
2113 Thread::muxRelease(&gListLock);
2114 timer.stop();
2115 {
2116 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2117 assert(SafepointSynchronize::is_synchronizing(), "sanity check");
2118 ThreadBlockInVM blocker(cur_jt);
2119 }
2120 // Prepare for another loop after the safepoint.
2121 freeHeadp = NULL;
2122 freeTailp = NULL;
2123 if (log_is_enabled(Info, monitorinflation)) {
2124 timer.start();
2125 }
2126 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(2)");
2127 }
2128 } while (savedMidInUsep != NULL);
2129 Thread::muxRelease(&gListLock);
2130 timer.stop();
2131
2132 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2133 LogStreamHandle(Info, monitorinflation) lsh_info;
2134 LogStream * ls = NULL;
2135 if (log_is_enabled(Debug, monitorinflation)) {
2136 ls = &lsh_debug;
2137 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2138 ls = &lsh_info;
2139 }
2140 if (ls != NULL) {
2141 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2142 }
2143 }
2144
2145 // Deflate per-thread idle ObjectMonitors using a JavaThread.
2146 //
2147 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() {
2148 assert(AsyncDeflateIdleMonitors, "sanity check");
2149 assert(Thread::current()->is_Java_thread(), "precondition");
2150 JavaThread * cur_jt = JavaThread::current();
2151
2152 cur_jt->omShouldDeflateIdleMonitors = false;
2153
2154 int deflated_count = 0;
2155 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors
2156 ObjectMonitor * freeTailp = NULL;
2157 ObjectMonitor * savedMidInUsep = NULL;
2158 elapsedTimer timer;
2159
2160 if (log_is_enabled(Info, monitorinflation)) {
2161 timer.start();
2162 }
2163
2164 OM_PERFDATA_OP(MonExtant, inc(cur_jt->omInUseCount));
2165 do {
2166 int local_deflated_count = deflate_monitor_list_using_JT(cur_jt->omInUseList_addr(), &freeHeadp, &freeTailp, &savedMidInUsep);
2167 cur_jt->omInUseCount -= local_deflated_count;
2168 deflated_count += local_deflated_count;
2169
2170 if (freeHeadp != NULL) {
2171 // Move the scavenged ObjectMonitors to the global free list.
2172 Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT");
2173 guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count);
2174 assert(freeTailp->FreeNext == NULL, "invariant");
2175
2176 // Constant-time list splice - prepend scavenged segment to gFreeList.
2177 freeTailp->FreeNext = gFreeList;
2178 gFreeList = freeHeadp;
2179
2180 gMonitorFreeCount += local_deflated_count;
2181 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2182 Thread::muxRelease(&gListLock);
2183 // Prepare for another loop on the current JavaThread.
2184 freeHeadp = NULL;
2185 freeTailp = NULL;
2186 }
2187 timer.stop();
2188
2189 if (savedMidInUsep != NULL) {
2190 // deflate_monitor_list_using_JT() detected a safepoint starting.
2191 {
2192 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(cur_jt));
2193 assert(SafepointSynchronize::is_synchronizing(), "sanity check");
2194 ThreadBlockInVM blocker(cur_jt);
2195 }
2196 // Prepare for another loop on the current JavaThread after
2197 // the safepoint.
2198 if (log_is_enabled(Info, monitorinflation)) {
2199 timer.start();
2200 }
2201 }
2202 } while (savedMidInUsep != NULL);
2203
2204 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2205 LogStreamHandle(Info, monitorinflation) lsh_info;
2206 LogStream * ls = NULL;
2207 if (log_is_enabled(Debug, monitorinflation)) {
2208 ls = &lsh_debug;
2209 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2210 ls = &lsh_info;
2211 }
2212 if (ls != NULL) {
2213 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(cur_jt), timer.seconds(), deflated_count);
2214 }
2215 }
2216
2217 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2218 // Report the cumulative time for deflating each thread's idle
2219 // monitors. Note: if the work is split among more than one
2220 // worker thread, then the reported time will likely be more
2221 // than a beginning to end measurement of the phase.
2222 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle
2223 // monitors at a safepoint when a special cleanup has been requested.
2224 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged);
2225
2226 bool needs_special_cleanup = is_cleanup_requested();
2227 if (!AsyncDeflateIdleMonitors || needs_special_cleanup) {
2228 // AsyncDeflateIdleMonitors does not use these counters unless
2229 // there is a special cleanup request.
2230
2231 gMonitorFreeCount += counters->nScavenged;
2232
2233 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
2241 } else if (log_is_enabled(Info, monitorinflation)) {
2242 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
2243 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, "
2244 "gMonitorFreeCount=%d", gMonitorPopulation,
2245 gOmInUseCount, gMonitorFreeCount);
2246 Thread::muxRelease(&gListLock);
2247 }
2248
2249 ForceMonitorScavenge = 0; // Reset
2250 GVars.stwRandom = os::random();
2251 GVars.stwCycle++;
2252 if (needs_special_cleanup) {
2253 set_is_cleanup_requested(false); // special clean up is done
2254 }
2255 }
2256
2257 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2258 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2259
2260 if (AsyncDeflateIdleMonitors) {
2261 // Nothing to do when idle ObjectMonitors are deflated using a
2262 // JavaThread unless a special cleanup has been requested.
2263 if (!is_cleanup_requested()) {
2264 return;
2265 }
2266 }
2267
2268 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2269 ObjectMonitor * freeTailp = NULL;
2270 elapsedTimer timer;
2271
2272 if (log_is_enabled(Info, safepoint, cleanup) ||
2273 log_is_enabled(Info, monitorinflation)) {
2274 timer.start();
2275 }
2276
2277 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
2278
2279 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
2280
2281 // Adjust counters
2282 counters->nInCirculation += thread->omInUseCount;
2283 thread->omInUseCount -= deflated_count;
2474 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
2475 outputStream * out, int *error_cnt_p) {
2476 if ((!AsyncDeflateIdleMonitors && n->is_busy()) ||
2477 (AsyncDeflateIdleMonitors && n->is_busy_async())) {
2478 if (jt != NULL) {
2479 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2480 ": free per-thread monitor must not be busy.", p2i(jt),
2481 p2i(n));
2482 } else {
2483 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2484 "must not be busy.", p2i(n));
2485 }
2486 *error_cnt_p = *error_cnt_p + 1;
2487 }
2488 if (n->header() != NULL) {
2489 if (jt != NULL) {
2490 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2491 ": free per-thread monitor must have NULL _header "
2492 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
2493 p2i(n->header()));
2494 } else {
2495 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2496 "must have NULL _header field: _header=" INTPTR_FORMAT,
2497 p2i(n), p2i(n->header()));
2498 }
2499 *error_cnt_p = *error_cnt_p + 1;
2500 }
2501 if (n->object() != NULL) {
2502 if (jt != NULL) {
2503 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2504 ": free per-thread monitor must have NULL _object "
2505 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
2506 p2i(n->object()));
2507 } else {
2508 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2509 "must have NULL _object field: _object=" INTPTR_FORMAT,
2510 p2i(n), p2i(n->object()));
2511 }
2512 *error_cnt_p = *error_cnt_p + 1;
2513 }
2514 }
2515
2516 // Check the global free list and count; log the results of the checks.
2517 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
2518 int *error_cnt_p) {
2519 int chkMonitorFreeCount = 0;
|
731 Handle hobj(Self, obj);
732 // Relaxing assertion for bug 6320749.
733 assert(Universe::verify_in_progress() ||
734 !SafepointSynchronize::is_at_safepoint(),
735 "biases should not be seen by VM thread here");
736 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
737 obj = hobj();
738 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
739 }
740 }
741
742 // hashCode() is a heap mutator ...
743 // Relaxing assertion for bug 6320749.
744 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
745 !SafepointSynchronize::is_at_safepoint(), "invariant");
746 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
747 Self->is_Java_thread() , "invariant");
748 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
749 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
750
751 while (true) {
752 ObjectMonitor* monitor = NULL;
753 markOop temp, test;
754 intptr_t hash;
755 markOop mark = ReadStableMark(obj);
756
757 // object should remain ineligible for biased locking
758 assert(!mark->has_bias_pattern(), "invariant");
759
760 if (mark->is_neutral()) {
761 hash = mark->hash(); // this is a normal header
762 if (hash != 0) { // if it has hash, just return it
763 return hash;
764 }
765 hash = get_next_hash(Self, obj); // allocate a new hash code
766 temp = mark->copy_set_hash(hash); // merge the hash code into header
767 // use (machine word version) atomic operation to install the hash
768 test = obj->cas_set_mark(temp, mark);
769 if (test == mark) {
770 return hash;
771 }
772 // If atomic operation failed, we must inflate the header
773 // into heavy weight monitor. We could add more code here
774 // for fast path, but it does not worth the complexity.
775 } else if (mark->has_monitor()) {
776 ObjectMonitorHandle omh;
777 if (!omh.save_om_ptr(obj, mark)) {
778 // Lost a race with async deflation so try again.
779 assert(AsyncDeflateIdleMonitors, "sanity check");
780 continue;
781 }
782 monitor = omh.om_ptr();
783 temp = monitor->header();
784 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
785 hash = temp->hash();
786 if (hash != 0) {
787 return hash;
788 }
789 // Skip to the following code to reduce code size
790 } else if (Self->is_lock_owned((address)mark->locker())) {
791 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
792 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
793 hash = temp->hash(); // by current thread, check if the displaced
794 if (hash != 0) { // header contains hash code
795 return hash;
796 }
797 // WARNING:
798 // The displaced header in the BasicLock on a thread's stack
799 // is strictly immutable. It CANNOT be changed in ANY cases.
800 // So we have to inflate the stack lock into an ObjectMonitor
801 // even if the current thread owns the lock. The BasicLock on
802 // a thread's stack can be asynchronously read by other threads
803 // during an inflate() call so any change to that stack memory
804 // may not propagate to other threads correctly.
805 }
806
807 // Inflate the monitor to set hash code
808 ObjectMonitorHandle omh;
809 inflate(&omh, Self, obj, inflate_cause_hash_code);
810 monitor = omh.om_ptr();
811 // Load displaced header and check it has hash code
812 mark = monitor->header();
813 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
814 hash = mark->hash();
815 if (hash == 0) {
816 hash = get_next_hash(Self, obj);
817 temp = mark->copy_set_hash(hash); // merge hash code into header
818 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp));
819 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
820 if (test != mark) {
821 // The only non-deflation update to the ObjectMonitor's
822 // header/dmw field is to merge in the hash code. If someone
823 // adds a new usage of the header/dmw field, please update
824 // this code.
825 // ObjectMonitor::install_displaced_markword_in_object()
826 // does mark the header/dmw field as part of async deflation,
827 // but that protocol cannot happen now due to the
828 // ObjectMonitorHandle above.
829 hash = test->hash();
830 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test));
831 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
832 }
833 }
834 // We finally get the hash
835 return hash;
836 }
837 }
838
839 // Deprecated -- use FastHashCode() instead.
840
841 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
842 return FastHashCode(Thread::current(), obj());
843 }
844
845
846 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
847 Handle h_obj) {
848 if (UseBiasedLocking) {
849 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
850 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
851 }
852
853 assert(thread == JavaThread::current(), "Can only be called on current thread");
854 oop obj = h_obj();
855
856 while (true) {
1154 Self->omInUseCount++;
1155 return m;
1156 }
1157
1158 // 2: try to allocate from the global gFreeList
1159 // CONSIDER: use muxTry() instead of muxAcquire().
1160 // If the muxTry() fails then drop immediately into case 3.
1161 // If we're using thread-local free lists then try
1162 // to reprovision the caller's free list.
1163 if (gFreeList != NULL) {
1164 // Reprovision the thread's omFreeList.
1165 // Use bulk transfers to reduce the allocation rate and heat
1166 // on various locks.
1167 Thread::muxAcquire(&gListLock, "omAlloc(1)");
1168 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1169 gMonitorFreeCount--;
1170 ObjectMonitor * take = gFreeList;
1171 gFreeList = take->FreeNext;
1172 guarantee(take->object() == NULL, "invariant");
1173 if (AsyncDeflateIdleMonitors) {
1174 // Clear any values we allowed to linger during async deflation.
1175 take->_header = NULL;
1176 take->set_owner(NULL);
1177 take->_contentions = 0;
1178 }
1179 guarantee(!take->is_busy(), "invariant");
1180 take->Recycle();
1181 assert(take->is_free(), "invariant");
1182 omRelease(Self, take, false);
1183 }
1184 Thread::muxRelease(&gListLock);
1185 Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1186 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1187
1188 const int mx = MonitorBound;
1189 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1190 // We can't safely induce a STW safepoint from omAlloc() as our thread
1191 // state may not be appropriate for such activities and callers may hold
1192 // naked oops, so instead we defer the action.
1193 InduceScavenge(Self, "omAlloc");
1194 }
1195 continue;
1335 // run at the same time as omFlush() so we have to be careful.
1336
1337 void ObjectSynchronizer::omFlush(Thread * Self) {
1338 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL
1339 ObjectMonitor * tail = NULL;
1340 int tally = 0;
1341 if (list != NULL) {
1342 ObjectMonitor * s;
1343 // The thread is going away, the per-thread free monitors
1344 // are freed via set_owner(NULL)
1345 // Link them to tail, which will be linked into the global free list
1346 // gFreeList below, under the gListLock
1347 for (s = list; s != NULL; s = s->FreeNext) {
1348 tally++;
1349 tail = s;
1350 guarantee(s->object() == NULL, "invariant");
1351 guarantee(!s->is_busy(), "invariant");
1352 s->set_owner(NULL); // redundant but good hygiene
1353 }
1354 guarantee(tail != NULL, "invariant");
1355 ADIM_guarantee(Self->omFreeCount == tally, "free-count off");
1356 Self->omFreeList = NULL;
1357 Self->omFreeCount = 0;
1358 }
1359
1360 ObjectMonitor * inUseList = Self->omInUseList;
1361 ObjectMonitor * inUseTail = NULL;
1362 int inUseTally = 0;
1363 if (inUseList != NULL) {
1364 ObjectMonitor *cur_om;
1365 // The thread is going away, however the omInUseList inflated
1366 // monitors may still be in-use by other threads.
1367 // Link them to inUseTail, which will be linked into the global in-use list
1368 // gOmInUseList below, under the gListLock
1369 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1370 inUseTail = cur_om;
1371 inUseTally++;
1372 ADIM_guarantee(cur_om->is_active(), "invariant");
1373 }
1374 guarantee(inUseTail != NULL, "invariant");
1375 ADIM_guarantee(Self->omInUseCount == inUseTally, "in-use count off");
1376 Self->omInUseList = NULL;
1377 Self->omInUseCount = 0;
1378 }
1379
1380 Thread::muxAcquire(&gListLock, "omFlush");
1381 if (tail != NULL) {
1382 tail->FreeNext = gFreeList;
1383 gFreeList = list;
1384 gMonitorFreeCount += tally;
1385 }
1386
1387 if (inUseTail != NULL) {
1388 inUseTail->FreeNext = gOmInUseList;
1389 gOmInUseList = inUseList;
1390 gOmInUseCount += inUseTally;
1391 }
1392
1393 Thread::muxRelease(&gListLock);
1394
1395 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1550 // value from the basiclock on the owner's stack to the objectMonitor, all
1551 // the while preserving the hashCode stability invariants. If the owner
1552 // decides to release the lock while the value is 0, the unlock will fail
1553 // and control will eventually pass from slow_exit() to inflate. The owner
1554 // will then spin, waiting for the 0 value to disappear. Put another way,
1555 // the 0 causes the owner to stall if the owner happens to try to
1556 // drop the lock (restoring the header from the basiclock to the object)
1557 // while inflation is in-progress. This protocol avoids races that might
1558 // would otherwise permit hashCode values to change or "flicker" for an object.
1559 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1560 // 0 serves as a "BUSY" inflate-in-progress indicator.
1561
1562
1563 // fetch the displaced mark from the owner's stack.
1564 // The owner can't die or unwind past the lock while our INFLATING
1565 // object is in the mark. Furthermore the owner can't complete
1566 // an unlock on the object, either.
1567 markOop dmw = mark->displaced_mark_helper();
1568 // Catch if the object's header is not neutral (not locked and
1569 // not marked is what we care about here).
1570 ADIM_guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1571
1572 // Setup monitor fields to proper values -- prepare the monitor
1573 m->set_header(dmw);
1574
1575 // Optimization: if the mark->locker stack address is associated
1576 // with this thread we could simply set m->_owner = Self.
1577 // Note that a thread can inflate an object
1578 // that it has stack-locked -- as might happen in wait() -- directly
1579 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1580 m->set_owner(mark->locker());
1581 m->set_object(object);
1582 // TODO-FIXME: assert BasicLock->dhw != 0.
1583
1584 omh_p->set_om_ptr(m);
1585 assert(m->is_new(), "freshly allocated monitor must be new");
1586 m->set_allocation_state(ObjectMonitor::Old);
1587
1588 // Must preserve store ordering. The monitor state must
1589 // be stable at the time of publishing the monitor address.
1590 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1591 object->release_set_mark(markOopDesc::encode(m));
1592
1593 // Hopefully the performance counters are allocated on distinct cache lines
1594 // to avoid false sharing on MP systems ...
1595 OM_PERFDATA_OP(Inflations, inc());
1596 if (log_is_enabled(Trace, monitorinflation)) {
1597 ResourceMark rm(Self);
1598 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1599 INTPTR_FORMAT ", type='%s'", p2i(object),
1600 p2i(object->mark()), object->klass()->external_name());
1601 }
1602 if (event.should_commit()) {
1603 post_monitor_inflate_event(&event, object, cause);
1604 }
1605 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
1606 return;
1607 }
1608
1609 // CASE: neutral
1610 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1611 // If we know we're inflating for entry it's better to inflate by swinging a
1612 // pre-locked objectMonitor pointer into the object header. A successful
1613 // CAS inflates the object *and* confers ownership to the inflating thread.
1614 // In the current implementation we use a 2-step mechanism where we CAS()
1615 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1616 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1617 // would be useful.
1618
1619 // Catch if the object's header is not neutral (not locked and
1620 // not marked is what we care about here).
1621 ADIM_guarantee(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark));
1622 ObjectMonitor * m;
1623 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) {
1624 // If !AsyncDeflateIdleMonitors or if an internal inflation, then
1625 // we won't stop for a potential safepoint in omAlloc.
1626 m = omAlloc(Self, cause);
1627 } else {
1628 // If AsyncDeflateIdleMonitors and not an internal inflation, then
1629 // we may stop for a safepoint in omAlloc() so protect object.
1630 Handle h_obj(Self, object);
1631 m = omAlloc(Self, cause);
1632 object = h_obj(); // Refresh object.
1633 }
1634 // prepare m for installation - set monitor to initial state
1635 m->Recycle();
1636 m->set_header(mark);
1637 m->set_owner(NULL);
1638 m->set_object(object);
1639 m->_recursions = 0;
1640 m->_Responsible = NULL;
1641 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
1642
1643 omh_p->set_om_ptr(m);
1644 assert(m->is_new(), "freshly allocated monitor must be new");
1645 m->set_allocation_state(ObjectMonitor::Old);
1646
1647 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1648 m->set_header(NULL);
1649 m->set_object(NULL);
1650 m->Recycle();
1651 omh_p->set_om_ptr(NULL);
1652 // omRelease() will reset the allocation state
1653 omRelease(Self, m, true);
1654 m = NULL;
1655 continue;
1656 // interference - the markword changed - just retry.
1657 // The state-transitions are one-way, so there's no chance of
1658 // live-lock -- "Inflated" is an absorbing state.
1659 }
1660
1661 // Hopefully the performance counters are allocated on distinct
1662 // cache lines to avoid false sharing on MP systems ...
1663 OM_PERFDATA_OP(Inflations, inc());
1664 if (log_is_enabled(Trace, monitorinflation)) {
1665 ResourceMark rm(Self);
1666 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1667 INTPTR_FORMAT ", type='%s'", p2i(object),
1668 p2i(object->mark()), object->klass()->external_name());
1669 }
1670 if (event.should_commit()) {
1671 post_monitor_inflate_event(&event, object, cause);
1672 }
1673 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
1674 return;
1675 }
1676 }
1677
1678
1679 // We maintain a list of in-use monitors for each thread.
1680 //
1681 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1682 // deflate_idle_monitors() scans only a global list of in-use monitors which
1683 // is populated only as a thread dies (see omFlush()).
1684 //
1685 // These operations are called at all safepoints, immediately after mutators
1686 // are stopped, but before any objects have moved. Collectively they traverse
1687 // the population of in-use monitors, deflating where possible. The scavenged
1688 // monitors are returned to the global monitor free list.
1689 //
1690 // Beware that we scavenge at *every* stop-the-world point. Having a large
1691 // number of monitors in-use could negatively impact performance. We also want
1692 // to minimize the total # of monitors in circulation, as they incur a small
1693 // footprint penalty.
1694 //
1695 // Perversely, the heap size -- and thus the STW safepoint rate --
1696 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1697 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1698 // This is an unfortunate aspect of this design.
1699
1700 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) {
1701 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1702
1703 // The per-thread in-use lists are handled in
1704 // ParallelSPCleanupThreadClosure::do_thread().
1705
1706 if (!AsyncDeflateIdleMonitors || is_cleanup_requested()) {
1707 // Use the older mechanism for the global in-use list or
1708 // if a special cleanup has been requested.
1709 ObjectSynchronizer::deflate_idle_monitors(_counters);
1710 return;
1711 }
1712
1713 log_debug(monitorinflation)("requesting deflation of idle monitors.");
1714 // Request deflation of global idle monitors by the ServiceThread:
1715 _gOmShouldDeflateIdleMonitors = true;
1716 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1717 Service_lock->notify_all();
1718 }
1719
1720 // Deflate a single monitor if not in-use
1721 // Return true if deflated, false if in-use
1722 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1723 ObjectMonitor** freeHeadp,
1724 ObjectMonitor** freeTailp) {
1725 bool deflated;
1726 // Normal case ... The monitor is associated with obj.
1727 const markOop mark = obj->mark();
1728 guarantee(mark == markOopDesc::encode(mid), "should match: mark="
1729 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark),
1730 p2i(markOopDesc::encode(mid)));
1731 // Make sure that mark->monitor() and markOopDesc::encode() agree:
1732 guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1733 ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid));
1734 const markOop dmw = mid->header();
1735 guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw));
1736
1737 if (mid->is_busy()) {
1755 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1756 p2i(mid->object()));
1757 assert(mid->is_free(), "invariant");
1758
1759 // Move the object to the working free list defined by freeHeadp, freeTailp
1760 if (*freeHeadp == NULL) *freeHeadp = mid;
1761 if (*freeTailp != NULL) {
1762 ObjectMonitor * prevtail = *freeTailp;
1763 assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1764 prevtail->FreeNext = mid;
1765 }
1766 *freeTailp = mid;
1767 deflated = true;
1768 }
1769 return deflated;
1770 }
1771
1772 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
1773 // Returns true if it was deflated and false otherwise.
1774 //
1775 // The async deflation protocol sets owner to DEFLATER_MARKER and
1776 // makes contentions negative as signals to contending threads that
1777 // an async deflation is in progress. There are a number of checks
1778 // as part of the protocol to make sure that the calling thread has
1779 // not lost the race to a contending thread.
1780 //
1781 // The ObjectMonitor has been successfully async deflated when:
1782 // (owner == DEFLATER_MARKER && contentions < 0). Contending threads
1783 // that see those values know to retry their operation.
1784 //
1785 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
1786 ObjectMonitor** freeHeadp,
1787 ObjectMonitor** freeTailp) {
1788 assert(AsyncDeflateIdleMonitors, "sanity check");
1789 assert(Thread::current()->is_Java_thread(), "precondition");
1790 // A newly allocated ObjectMonitor should not be seen here so we
1791 // avoid an endless inflate/deflate cycle.
1792 assert(mid->is_old(), "must be old: allocation_state=%d",
1793 (int) mid->allocation_state());
1794
1795 if (mid->is_busy() || mid->ref_count() != 0) {
1796 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
1797 // is in use so no deflation.
1798 return false;
1799 }
1800
1801 if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) {
1802 // ObjectMonitor is not owned by another thread. Our setting
1803 // owner to DEFLATER_MARKER forces any contending thread through
1804 // the slow path. This is just the first part of the async
1805 // deflation dance.
1806
1807 if (mid->_waiters != 0 || mid->ref_count() != 0) {
1808 // Another thread has raced to enter the ObjectMonitor after
1809 // mid->is_busy() above and has already waited on it which
1810 // makes it busy so no deflation. Or the ObjectMonitor* is
1811 // in use for some other operation like inflate(). Restore
1812 // owner to NULL if it is still DEFLATER_MARKER.
1813 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
1814 return false;
1815 }
1816
1817 if (Atomic::cmpxchg(-max_jint, &mid->_contentions, (jint)0) == 0) {
1818 // Make contentions negative to force racing threads to retry.
1819 // This is the second part of the async deflation dance.
1820
1821 if (mid->_owner == DEFLATER_MARKER && mid->ref_count() == 0) {
1822 // If owner is still DEFLATER_MARKER, then we have successfully
1823 // signaled any racing threads to retry. If it is not, then we
1824 // have lost the race to an entering thread and the ObjectMonitor
1825 // is now busy. If the ObjectMonitor* is in use, then we have
1826 // lost that race. This is the third and final part of the async
1827 // deflation dance.
1828 // Note: This owner check solves the ABA problem with contentions
1829 // where another thread acquired the ObjectMonitor, finished
1830 // using it and restored the contentions to zero.
1831 // Note: This ref_count check solves the race with save_om_ptr()
1832 // where its ref_count increment happens after the first ref_count
1833 // check in this function and before contentions is made negative.
1834
1835 // Sanity checks for the races:
1836 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters);
1837 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq="
1838 INTPTR_FORMAT, p2i(mid->_cxq));
1839 guarantee(mid->_EntryList == NULL,
1840 "must be no entering threads: EntryList=" INTPTR_FORMAT,
1841 p2i(mid->_EntryList));
1842
1843 const oop obj = (oop) mid->object();
1844 if (log_is_enabled(Trace, monitorinflation)) {
1845 ResourceMark rm;
1846 log_trace(monitorinflation)("deflate_monitor_using_JT: "
1847 "object=" INTPTR_FORMAT ", mark="
1848 INTPTR_FORMAT ", type='%s'",
1849 p2i(obj), p2i(obj->mark()),
1850 obj->klass()->external_name());
1851 }
1852
1853 // Install the old mark word if nobody else has already done it.
1854 mid->install_displaced_markword_in_object(obj);
1855 mid->clear_using_JT();
1856
1857 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
1858 p2i(mid->object()));
1859 assert(mid->is_free(), "must be free: allocation_state=%d",
1860 (int) mid->allocation_state());
1861
1862 // Move the deflated ObjectMonitor to the working free list
1863 // defined by freeHeadp and freeTailp.
1864 if (*freeHeadp == NULL) {
1865 // First one on the list.
1866 *freeHeadp = mid;
1867 }
1868 if (*freeTailp != NULL) {
1869 // We append to the list so the caller can use mid->FreeNext
1870 // to fix the linkages in its context.
1871 ObjectMonitor * prevtail = *freeTailp;
1872 // Should have been cleaned up by the caller:
1873 assert(prevtail->FreeNext == NULL, "must be NULL: FreeNext="
1874 INTPTR_FORMAT, p2i(prevtail->FreeNext));
1875 prevtail->FreeNext = mid;
1876 }
1877 *freeTailp = mid;
1878
1879 // At this point, mid->FreeNext still refers to its current
1880 // value and another ObjectMonitor's FreeNext field still
1881 // refers to this ObjectMonitor. Those linkages have to be
1882 // cleaned up by the caller who has the complete context.
1883
1884 // We leave owner == DEFLATER_MARKER and contentions < 0
1885 // to force any racing threads to retry.
1886 return true; // Success, ObjectMonitor has been deflated.
1887 }
1888
1889 // The owner was changed from DEFLATER_MARKER or ObjectMonitor*
1890 // is in use so we lost the race since the ObjectMonitor is now
1891 // busy.
1892
1893 // Restore owner to NULL if it is still DEFLATER_MARKER:
1894 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER);
1895
1896 // Add back max_jint to restore the contentions field to its
1897 // proper value (which may not be what we saw above):
1898 Atomic::add(max_jint, &mid->_contentions);
1899
1900 assert(mid->_contentions >= 0, "must not be negative: contentions=%d",
1901 mid->_contentions);
1902 }
1903
1904 // The contentions was no longer 0 so we lost the race since the
1905 // ObjectMonitor is now busy.
1906 assert(mid->_owner != DEFLATER_MARKER, "must not be DEFLATER_MARKER");
1907 }
1908
1909 // The owner field is no longer NULL so we lost the race since the
1910 // ObjectMonitor is now busy.
1911 return false;
1912 }
1913
1914 // Walk a given monitor list, and deflate idle monitors
1915 // The given list could be a per-thread list or a global list
1916 // Caller acquires gListLock as needed.
1917 //
1918 // In the case of parallel processing of thread local monitor lists,
1919 // work is done by Threads::parallel_threads_do() which ensures that
1920 // each Java thread is processed by exactly one worker thread, and
1921 // thus avoid conflicts that would arise when worker threads would
1922 // process the same monitor lists concurrently.
1923 //
1924 // See also ParallelSPCleanupTask and
1925 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1926 // Threads::parallel_java_threads_do() in thread.cpp.
1927 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1928 ObjectMonitor** freeHeadp,
1929 ObjectMonitor** freeTailp) {
1986 // Only try to deflate if there is an associated Java object and if
1987 // mid is old (is not newly allocated and is not newly freed).
1988 if (mid->object() != NULL && mid->is_old() &&
1989 deflate_monitor_using_JT(mid, freeHeadp, freeTailp)) {
1990 // Deflation succeeded so update the in-use list.
1991 if (mid == *listHeadp) {
1992 *listHeadp = mid->FreeNext;
1993 } else if (cur_mid_in_use != NULL) {
1994 // Maintain the current in-use list.
1995 cur_mid_in_use->FreeNext = mid->FreeNext;
1996 }
1997 next = mid->FreeNext;
1998 mid->FreeNext = NULL;
1999 // At this point mid is disconnected from the in-use list
2000 // and is the current tail in the freeHeadp list.
2001 mid = next;
2002 deflated_count++;
2003 } else {
2004 // mid is considered in-use if it does not have an associated
2005 // Java object or mid is not old or deflation did not succeed.
2006 // A mid->is_new() node can be seen here when it is freshly
2007 // returned by omAlloc() (and skips the deflation code path).
2008 // A mid->is_old() node can be seen here when deflation failed.
2009 // A mid->is_free() node can be seen here when a fresh node from
2010 // omAlloc() is released by omRelease() due to losing the race
2011 // in inflate().
2012
2013 cur_mid_in_use = mid;
2014 mid = mid->FreeNext;
2015
2016 if (SafepointSynchronize::is_synchronizing() &&
2017 cur_mid_in_use != *listHeadp && cur_mid_in_use->is_old()) {
2018 // If a safepoint has started and cur_mid_in_use is not the list
2019 // head and is old, then it is safe to use as saved state. Return
2020 // to the caller so gListLock can be dropped as appropriate
2021 // before blocking.
2022 *savedMidInUsep = cur_mid_in_use;
2023 return deflated_count;
2024 }
2025 }
2026 }
2027 // We finished the list without a safepoint starting so there's
2028 // no need to save state.
2029 *savedMidInUsep = NULL;
2030 return deflated_count;
2031 }
2032
2033 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2034 counters->nInuse = 0; // currently associated with objects
2035 counters->nInCirculation = 0; // extant
2036 counters->nScavenged = 0; // reclaimed (global and per-thread)
2037 counters->perThreadScavenged = 0; // per-thread scavenge total
2038 counters->perThreadTimes = 0.0; // per-thread scavenge times
2039 }
2040
2041 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2042 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2043
2044 if (AsyncDeflateIdleMonitors) {
2045 // Nothing to do when global idle ObjectMonitors are deflated using
2046 // a JavaThread unless a special cleanup has been requested.
2047 if (!is_cleanup_requested()) {
2048 return;
2049 }
2050 }
2051
2052 bool deflated = false;
2053
2054 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2055 ObjectMonitor * freeTailp = NULL;
2056 elapsedTimer timer;
2057
2058 if (log_is_enabled(Info, monitorinflation)) {
2059 timer.start();
2060 }
2061
2062 // Prevent omFlush from changing mids in Thread dtor's during deflation
2063 // And in case the vm thread is acquiring a lock during a safepoint
2064 // See e.g. 6320749
2065 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
2066
2067 // Note: the thread-local monitors lists get deflated in
2068 // a separate pass. See deflate_thread_local_monitors().
2069
2070 // For moribund threads, scan gOmInUseList
2071 int deflated_count = 0;
2089 timer.stop();
2090
2091 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2092 LogStreamHandle(Info, monitorinflation) lsh_info;
2093 LogStream * ls = NULL;
2094 if (log_is_enabled(Debug, monitorinflation)) {
2095 ls = &lsh_debug;
2096 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2097 ls = &lsh_info;
2098 }
2099 if (ls != NULL) {
2100 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2101 }
2102 }
2103
2104 // Deflate global idle ObjectMonitors using a JavaThread.
2105 //
2106 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2107 assert(AsyncDeflateIdleMonitors, "sanity check");
2108 assert(Thread::current()->is_Java_thread(), "precondition");
2109 JavaThread * self = JavaThread::current();
2110
2111 _gOmShouldDeflateIdleMonitors = false;
2112
2113 deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2114 }
2115
2116 // Deflate per-thread idle ObjectMonitors using a JavaThread.
2117 //
2118 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() {
2119 assert(AsyncDeflateIdleMonitors, "sanity check");
2120 assert(Thread::current()->is_Java_thread(), "precondition");
2121 JavaThread * self = JavaThread::current();
2122
2123 self->omShouldDeflateIdleMonitors = false;
2124
2125 deflate_common_idle_monitors_using_JT(false /* !is_global */, self);
2126 }
2127
2128 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2129 //
2130 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self) {
2131 int deflated_count = 0;
2132 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors
2133 ObjectMonitor * freeTailp = NULL;
2134 ObjectMonitor * savedMidInUsep = NULL;
2135 elapsedTimer timer;
2136
2137 if (log_is_enabled(Info, monitorinflation)) {
2138 timer.start();
2139 }
2140
2141 if (is_global) {
2142 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)");
2143 OM_PERFDATA_OP(MonExtant, set_value(gOmInUseCount));
2144 } else {
2145 OM_PERFDATA_OP(MonExtant, inc(self->omInUseCount));
2146 }
2147
2148 do {
2149 int local_deflated_count;
2150 if (is_global) {
2151 local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp, &savedMidInUsep);
2152 gOmInUseCount -= local_deflated_count;
2153 } else {
2154 local_deflated_count = deflate_monitor_list_using_JT(self->omInUseList_addr(), &freeHeadp, &freeTailp, &savedMidInUsep);
2155 self->omInUseCount -= local_deflated_count;
2156 }
2157 deflated_count += local_deflated_count;
2158
2159 if (freeHeadp != NULL) {
2160 // Move the scavenged ObjectMonitors to the global free list.
2161 guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count);
2162 assert(freeTailp->FreeNext == NULL, "invariant");
2163
2164 if (!is_global) {
2165 Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)");
2166 }
2167 // Constant-time list splice - prepend scavenged segment to gFreeList.
2168 freeTailp->FreeNext = gFreeList;
2169 gFreeList = freeHeadp;
2170
2171 gMonitorFreeCount += local_deflated_count;
2172 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2173 if (!is_global) {
2174 Thread::muxRelease(&gListLock);
2175 }
2176 }
2177
2178 if (savedMidInUsep != NULL) {
2179 // deflate_monitor_list_using_JT() detected a safepoint starting.
2180 if (is_global) {
2181 Thread::muxRelease(&gListLock);
2182 }
2183 timer.stop();
2184 {
2185 if (is_global) {
2186 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2187 } else {
2188 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(self));
2189 }
2190 assert(SafepointSynchronize::is_synchronizing(), "sanity check");
2191 ThreadBlockInVM blocker(self);
2192 }
2193 // Prepare for another loop after the safepoint.
2194 freeHeadp = NULL;
2195 freeTailp = NULL;
2196 if (log_is_enabled(Info, monitorinflation)) {
2197 timer.start();
2198 }
2199 if (is_global) {
2200 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(3)");
2201 }
2202 }
2203 } while (savedMidInUsep != NULL);
2204 if (is_global) {
2205 Thread::muxRelease(&gListLock);
2206 }
2207 timer.stop();
2208
2209 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2210 LogStreamHandle(Info, monitorinflation) lsh_info;
2211 LogStream * ls = NULL;
2212 if (log_is_enabled(Debug, monitorinflation)) {
2213 ls = &lsh_debug;
2214 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2215 ls = &lsh_info;
2216 }
2217 if (ls != NULL) {
2218 if (is_global) {
2219 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2220 } else {
2221 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count);
2222 }
2223 }
2224 }
2225
2226 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2227 // Report the cumulative time for deflating each thread's idle
2228 // monitors. Note: if the work is split among more than one
2229 // worker thread, then the reported time will likely be more
2230 // than a beginning to end measurement of the phase.
2231 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle
2232 // monitors at a safepoint when a special cleanup has been requested.
2233 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged);
2234
2235 bool needs_special_cleanup = is_cleanup_requested();
2236 if (!AsyncDeflateIdleMonitors || needs_special_cleanup) {
2237 // AsyncDeflateIdleMonitors does not use these counters unless
2238 // there is a special cleanup request.
2239
2240 gMonitorFreeCount += counters->nScavenged;
2241
2242 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
2250 } else if (log_is_enabled(Info, monitorinflation)) {
2251 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
2252 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, "
2253 "gMonitorFreeCount=%d", gMonitorPopulation,
2254 gOmInUseCount, gMonitorFreeCount);
2255 Thread::muxRelease(&gListLock);
2256 }
2257
2258 ForceMonitorScavenge = 0; // Reset
2259 GVars.stwRandom = os::random();
2260 GVars.stwCycle++;
2261 if (needs_special_cleanup) {
2262 set_is_cleanup_requested(false); // special clean up is done
2263 }
2264 }
2265
2266 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2267 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2268
2269 if (AsyncDeflateIdleMonitors) {
2270 if (!is_cleanup_requested()) {
2271 // Mark the JavaThread for idle monitor cleanup if a special
2272 // cleanup has NOT been requested.
2273 if (thread->omInUseCount > 0) {
2274 // This JavaThread is using monitors so mark it.
2275 thread->omShouldDeflateIdleMonitors = true;
2276 }
2277 return;
2278 }
2279 }
2280
2281 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
2282 ObjectMonitor * freeTailp = NULL;
2283 elapsedTimer timer;
2284
2285 if (log_is_enabled(Info, safepoint, cleanup) ||
2286 log_is_enabled(Info, monitorinflation)) {
2287 timer.start();
2288 }
2289
2290 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
2291
2292 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
2293
2294 // Adjust counters
2295 counters->nInCirculation += thread->omInUseCount;
2296 thread->omInUseCount -= deflated_count;
2487 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n,
2488 outputStream * out, int *error_cnt_p) {
2489 if ((!AsyncDeflateIdleMonitors && n->is_busy()) ||
2490 (AsyncDeflateIdleMonitors && n->is_busy_async())) {
2491 if (jt != NULL) {
2492 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2493 ": free per-thread monitor must not be busy.", p2i(jt),
2494 p2i(n));
2495 } else {
2496 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2497 "must not be busy.", p2i(n));
2498 }
2499 *error_cnt_p = *error_cnt_p + 1;
2500 }
2501 if (n->header() != NULL) {
2502 if (jt != NULL) {
2503 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2504 ": free per-thread monitor must have NULL _header "
2505 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
2506 p2i(n->header()));
2507 *error_cnt_p = *error_cnt_p + 1;
2508 } else if (!AsyncDeflateIdleMonitors) {
2509 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2510 "must have NULL _header field: _header=" INTPTR_FORMAT,
2511 p2i(n), p2i(n->header()));
2512 *error_cnt_p = *error_cnt_p + 1;
2513 }
2514 }
2515 if (n->object() != NULL) {
2516 if (jt != NULL) {
2517 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2518 ": free per-thread monitor must have NULL _object "
2519 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
2520 p2i(n->object()));
2521 } else {
2522 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2523 "must have NULL _object field: _object=" INTPTR_FORMAT,
2524 p2i(n), p2i(n->object()));
2525 }
2526 *error_cnt_p = *error_cnt_p + 1;
2527 }
2528 }
2529
2530 // Check the global free list and count; log the results of the checks.
2531 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
2532 int *error_cnt_p) {
2533 int chkMonitorFreeCount = 0;
|