223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markWord::unused_mark());
242
243 if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Monitor Enter/Exit
261 // The interpreter and compiler assembly code tries to lock using the fast path
262 // of this algorithm. Make sure to update that code if the following function is
263 // changed. The implementation is extremely sensitive to race condition. Be careful.
728 // WARNING:
729 // The displaced header in the BasicLock on a thread's stack
730 // is strictly immutable. It CANNOT be changed in ANY cases.
731 // So we have to inflate the stack lock into an ObjectMonitor
732 // even if the current thread owns the lock. The BasicLock on
733 // a thread's stack can be asynchronously read by other threads
734 // during an inflate() call so any change to that stack memory
735 // may not propagate to other threads correctly.
736 }
737
738 // Inflate the monitor to set hash code
739 monitor = inflate(self, obj, inflate_cause_hash_code);
740 // Load displaced header and check it has hash code
741 mark = monitor->header();
742 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
743 hash = mark.hash();
744 if (hash == 0) {
745 hash = get_next_hash(self, obj);
746 temp = mark.copy_set_hash(hash); // merge hash code into header
747 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
748 uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
749 test = markWord(v);
750 if (test != mark) {
751 // The only non-deflation update to the ObjectMonitor's
752 // header/dmw field is to merge in the hash code. If someone
753 // adds a new usage of the header/dmw field, please update
754 // this code.
755 hash = test.hash();
756 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
757 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
758 }
759 }
760 // We finally get the hash
761 return hash;
762 }
763
764 // Deprecated -- use FastHashCode() instead.
765
766 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
767 return FastHashCode(Thread::current(), obj());
768 }
|
223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markWord::unused_mark());
242
243 if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Monitor Enter/Exit
261 // The interpreter and compiler assembly code tries to lock using the fast path
262 // of this algorithm. Make sure to update that code if the following function is
263 // changed. The implementation is extremely sensitive to race condition. Be careful.
728 // WARNING:
729 // The displaced header in the BasicLock on a thread's stack
730 // is strictly immutable. It CANNOT be changed in ANY cases.
731 // So we have to inflate the stack lock into an ObjectMonitor
732 // even if the current thread owns the lock. The BasicLock on
733 // a thread's stack can be asynchronously read by other threads
734 // during an inflate() call so any change to that stack memory
735 // may not propagate to other threads correctly.
736 }
737
738 // Inflate the monitor to set hash code
739 monitor = inflate(self, obj, inflate_cause_hash_code);
740 // Load displaced header and check it has hash code
741 mark = monitor->header();
742 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
743 hash = mark.hash();
744 if (hash == 0) {
745 hash = get_next_hash(self, obj);
746 temp = mark.copy_set_hash(hash); // merge hash code into header
747 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
748 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
749 test = markWord(v);
750 if (test != mark) {
751 // The only non-deflation update to the ObjectMonitor's
752 // header/dmw field is to merge in the hash code. If someone
753 // adds a new usage of the header/dmw field, please update
754 // this code.
755 hash = test.hash();
756 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
757 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
758 }
759 }
760 // We finally get the hash
761 return hash;
762 }
763
764 // Deprecated -- use FastHashCode() instead.
765
766 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
767 return FastHashCode(Thread::current(), obj());
768 }
|