222 // Case: TLE inimical operations such as nested/recursive synchronization
223
224 if (owner == Self) {
225 m->_recursions++;
226 return true;
227 }
228
229 // This Java Monitor is inflated so obj's header will never be
230 // displaced to this thread's BasicLock. Make the displaced header
231 // non-NULL so this BasicLock is not seen as recursive nor as
232 // being locked. We do this unconditionally so that this thread's
233 // BasicLock cannot be mis-interpreted by any stack walkers. For
234 // performance reasons, stack walkers generally first check for
235 // Biased Locking in the object's header, the second check is for
236 // stack-locking in the object's header, the third check is for
237 // recursive stack-locking in the displaced header in the BasicLock,
238 // and last are the inflated Java Monitor (ObjectMonitor) checks.
239 lock->set_displaced_header(markOopDesc::unused_mark());
240
241 if (owner == NULL &&
242 Atomic::cmpxchg((void*)Self, &(m->_owner), (void*)NULL) == NULL) {
243 assert(m->_recursions == 0, "invariant");
244 assert(m->_owner == Self, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Fast Monitor Enter/Exit
261 // This the fast monitor enter. The interpreter and compiler use
262 // some assembly copies of this code. Make sure update those code
935 }
936
937 // Visitors ...
938
939 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
940 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
941 while (block != NULL) {
942 assert(block->object() == CHAINMARKER, "must be a block header");
943 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
944 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
945 oop object = (oop)mid->object();
946 if (object != NULL) {
947 closure->do_monitor(mid);
948 }
949 }
950 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
951 }
952 }
953
954 // Get the next block in the block list.
955 static inline ObjectMonitor* next(ObjectMonitor* block) {
956 assert(block->object() == CHAINMARKER, "must be a block header");
957 block = block->FreeNext;
958 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
959 return block;
960 }
961
962 static bool monitors_used_above_threshold() {
963 if (gMonitorPopulation == 0) {
964 return false;
965 }
966 int monitors_used = gMonitorPopulation - gMonitorFreeCount;
967 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
968 return monitor_usage > MonitorUsedDeflationThreshold;
969 }
970
971 bool ObjectSynchronizer::is_cleanup_needed() {
972 if (MonitorUsedDeflationThreshold > 0) {
973 return monitors_used_above_threshold();
974 }
975 return false;
976 }
977
978 void ObjectSynchronizer::oops_do(OopClosure* f) {
979 if (MonitorInUseLists) {
980 // When using thread local monitor lists, we only scan the
981 // global used list here (for moribund threads), and
982 // the thread-local monitors in Thread::oops_do().
983 global_used_oops_do(f);
984 } else {
985 global_oops_do(f);
986 }
987 }
988
989 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
990 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
991 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
992 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
993 assert(block->object() == CHAINMARKER, "must be a block header");
994 for (int i = 1; i < _BLOCKSIZE; i++) {
995 ObjectMonitor* mid = (ObjectMonitor *)&block[i];
996 if (mid->object() != NULL) {
997 f->do_oop((oop*)mid->object_addr());
998 }
999 }
1000 }
1001 }
1002
1003 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1004 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1005 list_oops_do(gOmInUseList, f);
1006 }
1007
1008 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1009 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1010 list_oops_do(thread->omInUseList, f);
1011 }
1012
1714 // Prevent omFlush from changing mids in Thread dtor's during deflation
1715 // And in case the vm thread is acquiring a lock during a safepoint
1716 // See e.g. 6320749
1717 Thread::muxAcquire(&gListLock, "scavenge - return");
1718
1719 if (MonitorInUseLists) {
1720 // Note: the thread-local monitors lists get deflated in
1721 // a separate pass. See deflate_thread_local_monitors().
1722
1723 // For moribund threads, scan gOmInUseList
1724 if (gOmInUseList) {
1725 counters->nInCirculation += gOmInUseCount;
1726 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1727 gOmInUseCount -= deflated_count;
1728 counters->nScavenged += deflated_count;
1729 counters->nInuse += gOmInUseCount;
1730 }
1731
1732 } else {
1733 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
1734 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1735 // Iterate over all extant monitors - Scavenge all idle monitors.
1736 assert(block->object() == CHAINMARKER, "must be a block header");
1737 counters->nInCirculation += _BLOCKSIZE;
1738 for (int i = 1; i < _BLOCKSIZE; i++) {
1739 ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1740 oop obj = (oop)mid->object();
1741
1742 if (obj == NULL) {
1743 // The monitor is not associated with an object.
1744 // The monitor should either be a thread-specific private
1745 // free list or the global free list.
1746 // obj == NULL IMPLIES mid->is_busy() == 0
1747 guarantee(!mid->is_busy(), "invariant");
1748 continue;
1749 }
1750 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
1751
1752 if (deflated) {
1753 mid->FreeNext = NULL;
1754 counters->nScavenged++;
1950
1951 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1952 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1953 "to the struct end than a cache line which permits false "
1954 "sharing.");
1955 (*warning_cnt_ptr)++;
1956 }
1957 }
1958 }
1959
1960 #ifndef PRODUCT
1961
1962 // Check if monitor belongs to the monitor cache
1963 // The list is grow-only so it's *relatively* safe to traverse
1964 // the list of extant blocks without taking a lock.
1965
1966 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1967 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
1968 while (block != NULL) {
1969 assert(block->object() == CHAINMARKER, "must be a block header");
1970 if (monitor > (ObjectMonitor *)&block[0] &&
1971 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1972 address mon = (address)monitor;
1973 address blk = (address)block;
1974 size_t diff = mon - blk;
1975 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1976 return 1;
1977 }
1978 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1979 }
1980 return 0;
1981 }
1982
1983 #endif
|
222 // Case: TLE inimical operations such as nested/recursive synchronization
223
224 if (owner == Self) {
225 m->_recursions++;
226 return true;
227 }
228
229 // This Java Monitor is inflated so obj's header will never be
230 // displaced to this thread's BasicLock. Make the displaced header
231 // non-NULL so this BasicLock is not seen as recursive nor as
232 // being locked. We do this unconditionally so that this thread's
233 // BasicLock cannot be mis-interpreted by any stack walkers. For
234 // performance reasons, stack walkers generally first check for
235 // Biased Locking in the object's header, the second check is for
236 // stack-locking in the object's header, the third check is for
237 // recursive stack-locking in the displaced header in the BasicLock,
238 // and last are the inflated Java Monitor (ObjectMonitor) checks.
239 lock->set_displaced_header(markOopDesc::unused_mark());
240
241 if (owner == NULL &&
242 Atomic::cmpxchg(Self, &(m->_owner), (void*)NULL) == NULL) {
243 assert(m->_recursions == 0, "invariant");
244 assert(m->_owner == Self, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Fast Monitor Enter/Exit
261 // This the fast monitor enter. The interpreter and compiler use
262 // some assembly copies of this code. Make sure update those code
935 }
936
937 // Visitors ...
938
939 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
940 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
941 while (block != NULL) {
942 assert(block->object() == CHAINMARKER, "must be a block header");
943 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
944 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
945 oop object = (oop)mid->object();
946 if (object != NULL) {
947 closure->do_monitor(mid);
948 }
949 }
950 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
951 }
952 }
953
954 // Get the next block in the block list.
955 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
956 assert(block->object() == CHAINMARKER, "must be a block header");
957 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
958 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
959 return block;
960 }
961
962 static bool monitors_used_above_threshold() {
963 if (gMonitorPopulation == 0) {
964 return false;
965 }
966 int monitors_used = gMonitorPopulation - gMonitorFreeCount;
967 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
968 return monitor_usage > MonitorUsedDeflationThreshold;
969 }
970
971 bool ObjectSynchronizer::is_cleanup_needed() {
972 if (MonitorUsedDeflationThreshold > 0) {
973 return monitors_used_above_threshold();
974 }
975 return false;
976 }
977
978 void ObjectSynchronizer::oops_do(OopClosure* f) {
979 if (MonitorInUseLists) {
980 // When using thread local monitor lists, we only scan the
981 // global used list here (for moribund threads), and
982 // the thread-local monitors in Thread::oops_do().
983 global_used_oops_do(f);
984 } else {
985 global_oops_do(f);
986 }
987 }
988
989 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
990 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
991 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
992 for (; block != NULL; block = next(block)) {
993 assert(block->object() == CHAINMARKER, "must be a block header");
994 for (int i = 1; i < _BLOCKSIZE; i++) {
995 ObjectMonitor* mid = (ObjectMonitor *)&block[i];
996 if (mid->object() != NULL) {
997 f->do_oop((oop*)mid->object_addr());
998 }
999 }
1000 }
1001 }
1002
1003 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1004 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1005 list_oops_do(gOmInUseList, f);
1006 }
1007
1008 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1009 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1010 list_oops_do(thread->omInUseList, f);
1011 }
1012
1714 // Prevent omFlush from changing mids in Thread dtor's during deflation
1715 // And in case the vm thread is acquiring a lock during a safepoint
1716 // See e.g. 6320749
1717 Thread::muxAcquire(&gListLock, "scavenge - return");
1718
1719 if (MonitorInUseLists) {
1720 // Note: the thread-local monitors lists get deflated in
1721 // a separate pass. See deflate_thread_local_monitors().
1722
1723 // For moribund threads, scan gOmInUseList
1724 if (gOmInUseList) {
1725 counters->nInCirculation += gOmInUseCount;
1726 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1727 gOmInUseCount -= deflated_count;
1728 counters->nScavenged += deflated_count;
1729 counters->nInuse += gOmInUseCount;
1730 }
1731
1732 } else {
1733 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
1734 for (; block != NULL; block = next(block)) {
1735 // Iterate over all extant monitors - Scavenge all idle monitors.
1736 assert(block->object() == CHAINMARKER, "must be a block header");
1737 counters->nInCirculation += _BLOCKSIZE;
1738 for (int i = 1; i < _BLOCKSIZE; i++) {
1739 ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1740 oop obj = (oop)mid->object();
1741
1742 if (obj == NULL) {
1743 // The monitor is not associated with an object.
1744 // The monitor should either be a thread-specific private
1745 // free list or the global free list.
1746 // obj == NULL IMPLIES mid->is_busy() == 0
1747 guarantee(!mid->is_busy(), "invariant");
1748 continue;
1749 }
1750 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
1751
1752 if (deflated) {
1753 mid->FreeNext = NULL;
1754 counters->nScavenged++;
1950
1951 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1952 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1953 "to the struct end than a cache line which permits false "
1954 "sharing.");
1955 (*warning_cnt_ptr)++;
1956 }
1957 }
1958 }
1959
1960 #ifndef PRODUCT
1961
1962 // Check if monitor belongs to the monitor cache
1963 // The list is grow-only so it's *relatively* safe to traverse
1964 // the list of extant blocks without taking a lock.
1965
1966 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1967 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
1968 while (block != NULL) {
1969 assert(block->object() == CHAINMARKER, "must be a block header");
1970 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
1971 address mon = (address)monitor;
1972 address blk = (address)block;
1973 size_t diff = mon - blk;
1974 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1975 return 1;
1976 }
1977 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1978 }
1979 return 0;
1980 }
1981
1982 #endif
|