223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markWord::unused_mark());
242
243 if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Monitor Enter/Exit
261 // The interpreter and compiler assembly code tries to lock using the fast path
262 // of this algorithm. Make sure to update that code if the following function is
263 // changed. The implementation is extremely sensitive to race condition. Be careful.
728 // WARNING:
729 // The displaced header in the BasicLock on a thread's stack
730 // is strictly immutable. It CANNOT be changed in ANY cases.
731 // So we have to inflate the stack lock into an ObjectMonitor
732 // even if the current thread owns the lock. The BasicLock on
733 // a thread's stack can be asynchronously read by other threads
734 // during an inflate() call so any change to that stack memory
735 // may not propagate to other threads correctly.
736 }
737
738 // Inflate the monitor to set hash code
739 monitor = inflate(self, obj, inflate_cause_hash_code);
740 // Load displaced header and check it has hash code
741 mark = monitor->header();
742 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
743 hash = mark.hash();
744 if (hash == 0) {
745 hash = get_next_hash(self, obj);
746 temp = mark.copy_set_hash(hash); // merge hash code into header
747 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
748 uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
749 test = markWord(v);
750 if (test != mark) {
751 // The only non-deflation update to the ObjectMonitor's
752 // header/dmw field is to merge in the hash code. If someone
753 // adds a new usage of the header/dmw field, please update
754 // this code.
755 hash = test.hash();
756 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
757 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
758 }
759 }
760 // We finally get the hash
761 return hash;
762 }
763
764 // Deprecated -- use FastHashCode() instead.
765
766 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
767 return FastHashCode(Thread::current(), obj());
768 }
976 // type of limit. Beware that if MonitorBound is set to too low a value
977 // we could just loop. In addition, if MonitorBound is set to a low value
978 // we'll incur more safepoints, which are harmful to performance.
979 // See also: GuaranteedSafepointInterval
980 //
981 // The current implementation uses asynchronous VM operations.
982 //
983 // If MonitorBound is set, the boundry applies to
984 // (g_om_population - g_om_free_count)
985 // i.e., if there are not enough ObjectMonitors on the global free list,
986 // then a safepoint deflation is induced. Picking a good MonitorBound value
987 // is non-trivial.
988
989 static void InduceScavenge(Thread* self, const char * Whence) {
990 // Induce STW safepoint to trim monitors
991 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
992 // More precisely, trigger an asynchronous STW safepoint as the number
993 // of active monitors passes the specified threshold.
994 // TODO: assert thread state is reasonable
995
996 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
997 // Induce a 'null' safepoint to scavenge monitors
998 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
999 // to the VMthread and have a lifespan longer than that of this activation record.
1000 // The VMThread will delete the op when completed.
1001 VMThread::execute(new VM_ScavengeMonitors());
1002 }
1003 }
1004
1005 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1006 // A large MAXPRIVATE value reduces both list lock contention
1007 // and list coherency traffic, but also tends to increase the
1008 // number of ObjectMonitors in circulation as well as the STW
1009 // scavenge costs. As usual, we lean toward time in space-time
1010 // tradeoffs.
1011 const int MAXPRIVATE = 1024;
1012 stringStream ss;
1013 for (;;) {
1014 ObjectMonitor* m;
1015
1016 // 1: try to allocate from the thread's local om_free_list.
|
223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markWord::unused_mark());
242
243 if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Monitor Enter/Exit
261 // The interpreter and compiler assembly code tries to lock using the fast path
262 // of this algorithm. Make sure to update that code if the following function is
263 // changed. The implementation is extremely sensitive to race condition. Be careful.
728 // WARNING:
729 // The displaced header in the BasicLock on a thread's stack
730 // is strictly immutable. It CANNOT be changed in ANY cases.
731 // So we have to inflate the stack lock into an ObjectMonitor
732 // even if the current thread owns the lock. The BasicLock on
733 // a thread's stack can be asynchronously read by other threads
734 // during an inflate() call so any change to that stack memory
735 // may not propagate to other threads correctly.
736 }
737
738 // Inflate the monitor to set hash code
739 monitor = inflate(self, obj, inflate_cause_hash_code);
740 // Load displaced header and check it has hash code
741 mark = monitor->header();
742 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
743 hash = mark.hash();
744 if (hash == 0) {
745 hash = get_next_hash(self, obj);
746 temp = mark.copy_set_hash(hash); // merge hash code into header
747 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
748 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
749 test = markWord(v);
750 if (test != mark) {
751 // The only non-deflation update to the ObjectMonitor's
752 // header/dmw field is to merge in the hash code. If someone
753 // adds a new usage of the header/dmw field, please update
754 // this code.
755 hash = test.hash();
756 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
757 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
758 }
759 }
760 // We finally get the hash
761 return hash;
762 }
763
764 // Deprecated -- use FastHashCode() instead.
765
766 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
767 return FastHashCode(Thread::current(), obj());
768 }
976 // type of limit. Beware that if MonitorBound is set to too low a value
977 // we could just loop. In addition, if MonitorBound is set to a low value
978 // we'll incur more safepoints, which are harmful to performance.
979 // See also: GuaranteedSafepointInterval
980 //
981 // The current implementation uses asynchronous VM operations.
982 //
983 // If MonitorBound is set, the boundry applies to
984 // (g_om_population - g_om_free_count)
985 // i.e., if there are not enough ObjectMonitors on the global free list,
986 // then a safepoint deflation is induced. Picking a good MonitorBound value
987 // is non-trivial.
988
989 static void InduceScavenge(Thread* self, const char * Whence) {
990 // Induce STW safepoint to trim monitors
991 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
992 // More precisely, trigger an asynchronous STW safepoint as the number
993 // of active monitors passes the specified threshold.
994 // TODO: assert thread state is reasonable
995
996 if (ForceMonitorScavenge == 0 && Atomic::xchg(&ForceMonitorScavenge, 1) == 0) {
997 // Induce a 'null' safepoint to scavenge monitors
998 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
999 // to the VMthread and have a lifespan longer than that of this activation record.
1000 // The VMThread will delete the op when completed.
1001 VMThread::execute(new VM_ScavengeMonitors());
1002 }
1003 }
1004
1005 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1006 // A large MAXPRIVATE value reduces both list lock contention
1007 // and list coherency traffic, but also tends to increase the
1008 // number of ObjectMonitors in circulation as well as the STW
1009 // scavenge costs. As usual, we lean toward time in space-time
1010 // tradeoffs.
1011 const int MAXPRIVATE = 1024;
1012 stringStream ss;
1013 for (;;) {
1014 ObjectMonitor* m;
1015
1016 // 1: try to allocate from the thread's local om_free_list.
|