112 return 0;
113 }
114
115 #define NINFLATIONLOCKS 256
116 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
117
118 // global list of blocks of monitors
119 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
120 // global monitor free list
121 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
122 // global monitor in-use list, for moribund threads,
123 // monitors they inflated need to be scanned for deflation
124 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
125 // count of entries in gOmInUseList
126 int ObjectSynchronizer::gOmInUseCount = 0;
127
128 static volatile intptr_t gListLock = 0; // protects global monitor lists
129 static volatile int gMonitorFreeCount = 0; // # on gFreeList
130 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
131
132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
133
134
135 // =====================> Quick functions
136
137 // The quick_* forms are special fast-path variants used to improve
138 // performance. In the simplest case, a "quick_*" implementation could
139 // simply return false, in which case the caller will perform the necessary
140 // state transitions and call the slow-path form.
141 // The fast-path is designed to handle frequently arising cases in an efficient
142 // manner and is just a degenerate "optimistic" variant of the slow-path.
143 // returns true -- to indicate the call was satisfied.
144 // returns false -- to indicate the call needs the services of the slow-path.
145 // A no-loitering ordinance is in effect for code in the quick_* family
146 // operators: safepoints or indefinite blocking (blocking that might span a
147 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
148 // entry.
149 //
150 // Consider: An interesting optimization is to have the JIT recognize the
151 // following common idiom:
152 // synchronized (someobj) { .... ; notify(); }
153 // That is, we find a notify() or notifyAll() call that immediately precedes
154 // the monitorexit operation. In that case the JIT could fuse the operations
155 // into a single notifyAndExit() runtime primitive.
156
157 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
158 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
159 assert(self->is_Java_thread(), "invariant");
160 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
161 NoSafepointVerifier nsv;
162 if (obj == NULL) return false; // slow-path for invalid obj
163 const markOop mark = obj->mark();
164
165 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
166 // Degenerate notify
167 // stack-locked by caller so by definition the implied waitset is empty.
168 return true;
169 }
170
171 if (mark->has_monitor()) {
172 ObjectMonitor * const mon = mark->monitor();
173 assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
174 if (mon->owner() != self) return false; // slow-path for IMS exception
175
176 if (mon->first_waiter() != NULL) {
177 // We have one or more waiters. Since this is an inflated monitor
178 // that we own, we can transfer one or more threads from the waitset
179 // to the entrylist here and now, avoiding the slow-path.
180 if (all) {
181 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
182 } else {
193 }
194
195 // biased locking and any other IMS exception states take the slow-path
196 return false;
197 }
198
199
200 // The LockNode emitted directly at the synchronization site would have
201 // been too big if it were to have included support for the cases of inflated
202 // recursive enter and exit, so they go here instead.
203 // Note that we can't safely call AsyncPrintJavaStack() from within
204 // quick_enter() as our thread state remains _in_Java.
205
206 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
207 BasicLock * lock) {
208 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
209 assert(Self->is_Java_thread(), "invariant");
210 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
211 NoSafepointVerifier nsv;
212 if (obj == NULL) return false; // Need to throw NPE
213 const markOop mark = obj->mark();
214
215 if (mark->has_monitor()) {
216 ObjectMonitor * const m = mark->monitor();
217 assert(oopDesc::equals((oop) m->object(), obj), "invariant");
218 Thread * const owner = (Thread *) m->_owner;
219
220 // Lock contention and Transactional Lock Elision (TLE) diagnostics
221 // and observability
222 // Case: light contention possibly amenable to TLE
223 // Case: TLE inimical operations such as nested/recursive synchronization
224
225 if (owner == Self) {
226 m->_recursions++;
227 return true;
228 }
229
230 // This Java Monitor is inflated so obj's header will never be
231 // displaced to this thread's BasicLock. Make the displaced header
232 // non-NULL so this BasicLock is not seen as recursive nor as
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Fast Monitor Enter/Exit
261 // This the fast monitor enter. The interpreter and compiler use
262 // some assembly copies of this code. Make sure update those code
263 // if the following function is changed. The implementation is
264 // extremely sensitive to race condition. Be careful.
265
266 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
267 bool attempt_rebias, TRAPS) {
268 if (UseBiasedLocking) {
269 if (!SafepointSynchronize::is_at_safepoint()) {
270 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
271 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
272 return;
273 }
274 } else {
275 assert(!attempt_rebias, "can not rebias toward VM thread");
276 BiasedLocking::revoke_at_safepoint(obj);
277 }
278 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
279 }
280
281 slow_enter(obj, lock, THREAD);
282 }
283
284 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
285 markOop mark = object->mark();
286 // We cannot check for Biased Locking if we are racing an inflation.
287 assert(mark == markOopDesc::INFLATING() ||
288 !mark->has_bias_pattern(), "should not see bias pattern here");
289
290 markOop dhw = lock->displaced_header();
291 if (dhw == NULL) {
292 // If the displaced header is NULL, then this exit matches up with
293 // a recursive enter. No real work to do here except for diagnostics.
294 #ifndef PRODUCT
295 if (mark != markOopDesc::INFLATING()) {
296 // Only do diagnostics if we are not racing an inflation. Simply
297 // exiting a recursive enter of a Java Monitor that is being
298 // inflated is safe; see the has_monitor() comment below.
299 assert(!mark->is_neutral(), "invariant");
300 assert(!mark->has_locker() ||
301 THREAD->is_lock_owned((address)mark->locker()), "invariant");
302 if (mark->has_monitor()) {
303 // The BasicLock's displaced_header is marked as a recursive
304 // enter and we have an inflated Java Monitor (ObjectMonitor).
305 // This is a special case where the Java Monitor was inflated
319
320 if (mark == (markOop) lock) {
321 // If the object is stack-locked by the current thread, try to
322 // swing the displaced header from the BasicLock back to the mark.
323 assert(dhw->is_neutral(), "invariant");
324 if (object->cas_set_mark(dhw, mark) == mark) {
325 return;
326 }
327 }
328
329 // We have to take the slow-path of possible inflation and then exit.
330 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
331 }
332
333 // -----------------------------------------------------------------------------
334 // Interpreter/Compiler Slow Case
335 // This routine is used to handle interpreter/compiler slow case
336 // We don't need to use fast path here, because it must have been
337 // failed in the interpreter/compiler code.
338 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
339 markOop mark = obj->mark();
340 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
341
342 if (mark->is_neutral()) {
343 // Anticipate successful CAS -- the ST of the displaced mark must
344 // be visible <= the ST performed by the CAS.
345 lock->set_displaced_header(mark);
346 if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
347 return;
348 }
349 // Fall through to inflate() ...
350 } else if (mark->has_locker() &&
351 THREAD->is_lock_owned((address)mark->locker())) {
352 assert(lock != mark->locker(), "must not re-lock the same lock");
353 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
354 lock->set_displaced_header(NULL);
355 return;
356 }
357
358 // The object header will never be displaced to this lock,
367 // We don't need to use fast path here, because it must have
368 // failed in the interpreter/compiler code. Simply use the heavy
369 // weight monitor should be ok, unless someone find otherwise.
370 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
371 fast_exit(object, lock, THREAD);
372 }
373
374 // -----------------------------------------------------------------------------
375 // Class Loader support to workaround deadlocks on the class loader lock objects
376 // Also used by GC
377 // complete_exit()/reenter() are used to wait on a nested lock
378 // i.e. to give up an outer lock completely and then re-enter
379 // Used when holding nested locks - lock acquisition order: lock1 then lock2
380 // 1) complete_exit lock1 - saving recursion count
381 // 2) wait on lock2
382 // 3) when notified on lock2, unlock lock2
383 // 4) reenter lock1 with original recursion count
384 // 5) lock lock2
385 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
386 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
387 if (UseBiasedLocking) {
388 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
389 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
390 }
391
392 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
393
394 return monitor->complete_exit(THREAD);
395 }
396
397 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
398 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
399 if (UseBiasedLocking) {
400 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
401 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
402 }
403
404 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
405
406 monitor->reenter(recursion, THREAD);
407 }
408 // -----------------------------------------------------------------------------
409 // JNI locks on java objects
410 // NOTE: must use heavy weight monitor to handle jni monitor enter
411 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
412 // the current locking is from JNI instead of Java code
413 if (UseBiasedLocking) {
414 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
415 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
416 }
417 THREAD->set_current_pending_monitor_is_from_java(false);
418 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
419 THREAD->set_current_pending_monitor_is_from_java(true);
420 }
421
422 // NOTE: must use heavy weight monitor to handle jni monitor exit
423 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
424 if (UseBiasedLocking) {
425 Handle h_obj(THREAD, obj);
426 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
427 obj = h_obj();
428 }
429 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
430
431 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
432 // If this thread has locked the object, exit the monitor. Note: can't use
433 // monitor->check(CHECK); must exit even if an exception is pending.
434 if (monitor->check(THREAD)) {
435 monitor->exit(true, THREAD);
436 }
437 }
438
439 // -----------------------------------------------------------------------------
440 // Internal VM locks on java objects
441 // standard constructor, allows locking failures
442 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
443 _dolock = doLock;
444 _thread = thread;
445 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
446 _obj = obj;
447
448 if (_dolock) {
449 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
450 }
451 }
452
453 ObjectLocker::~ObjectLocker() {
454 if (_dolock) {
455 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
456 }
457 }
458
459
460 // -----------------------------------------------------------------------------
461 // Wait/Notify/NotifyAll
462 // NOTE: must use heavy weight monitor to handle wait()
463 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
464 if (UseBiasedLocking) {
465 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
466 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
467 }
468 if (millis < 0) {
469 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
470 }
471 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
472
473 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
474 monitor->wait(millis, true, THREAD);
475
476 // This dummy call is in place to get around dtrace bug 6254741. Once
477 // that's fixed we can uncomment the following line, remove the call
478 // and change this function back into a "void" func.
479 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
480 return dtrace_waited_probe(monitor, obj, THREAD);
481 }
482
483 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
484 if (UseBiasedLocking) {
485 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
486 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
487 }
488 if (millis < 0) {
489 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
490 }
491 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
492 }
493
494 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
495 if (UseBiasedLocking) {
496 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
497 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
498 }
499
500 markOop mark = obj->mark();
501 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
502 return;
503 }
504 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
505 }
506
507 // NOTE: see comment of notify()
508 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
509 if (UseBiasedLocking) {
510 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
511 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
512 }
513
514 markOop mark = obj->mark();
515 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
516 return;
517 }
518 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
519 }
520
521 // -----------------------------------------------------------------------------
522 // Hash Code handling
523 //
524 // Performance concern:
525 // OrderAccess::storestore() calls release() which at one time stored 0
526 // into the global volatile OrderAccess::dummy variable. This store was
527 // unnecessary for correctness. Many threads storing into a common location
528 // causes considerable cache migration or "sloshing" on large SMP systems.
662 // This is probably the best overall implementation -- we'll
663 // likely make this the default in future releases.
664 unsigned t = Self->_hashStateX;
665 t ^= (t << 11);
666 Self->_hashStateX = Self->_hashStateY;
667 Self->_hashStateY = Self->_hashStateZ;
668 Self->_hashStateZ = Self->_hashStateW;
669 unsigned v = Self->_hashStateW;
670 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
671 Self->_hashStateW = v;
672 value = v;
673 }
674
675 value &= markOopDesc::hash_mask;
676 if (value == 0) value = 0xBAD;
677 assert(value != markOopDesc::no_hash, "invariant");
678 return value;
679 }
680
681 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
682 if (UseBiasedLocking) {
683 // NOTE: many places throughout the JVM do not expect a safepoint
684 // to be taken here, in particular most operations on perm gen
685 // objects. However, we only ever bias Java instances and all of
686 // the call sites of identity_hash that might revoke biases have
687 // been checked to make sure they can handle a safepoint. The
688 // added check of the bias pattern is to avoid useless calls to
689 // thread-local storage.
690 if (obj->mark()->has_bias_pattern()) {
691 // Handle for oop obj in case of STW safepoint
692 Handle hobj(Self, obj);
693 // Relaxing assertion for bug 6320749.
694 assert(Universe::verify_in_progress() ||
695 !SafepointSynchronize::is_at_safepoint(),
696 "biases should not be seen by VM thread here");
697 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
698 obj = hobj();
699 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
700 }
701 }
766 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)mark));
767 hash = mark->hash();
768 if (hash == 0) {
769 hash = get_next_hash(Self, obj);
770 temp = mark->copy_set_hash(hash); // merge hash code into header
771 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp));
772 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
773 if (test != mark) {
774 // The only update to the header in the monitor (outside GC)
775 // is install the hash code. If someone add new usage of
776 // displaced header, please update this code
777 hash = test->hash();
778 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)test));
779 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
780 }
781 }
782 // We finally get the hash
783 return hash;
784 }
785
786 // Deprecated -- use FastHashCode() instead.
787
788 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
789 return FastHashCode(Thread::current(), obj());
790 }
791
792
793 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
794 Handle h_obj) {
795 if (UseBiasedLocking) {
796 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
797 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
798 }
799
800 assert(thread == JavaThread::current(), "Can only be called on current thread");
801 oop obj = h_obj();
802
803 markOop mark = ReadStableMark(obj);
804
805 // Uncontended case, header points to stack
806 if (mark->has_locker()) {
807 return thread->is_lock_owned((address)mark->locker());
808 }
809 // Contended case, header points to ObjectMonitor (tagged pointer)
810 if (mark->has_monitor()) {
811 ObjectMonitor* monitor = mark->monitor();
812 return monitor->is_entered(thread) != 0;
813 }
814 // Unlocked case, header in place
1284 }
1285
1286 // Fast path code shared by multiple functions
1287 void ObjectSynchronizer::inflate_helper(oop obj) {
1288 markOop mark = obj->mark();
1289 if (mark->has_monitor()) {
1290 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1291 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1292 return;
1293 }
1294 inflate(Thread::current(), obj, inflate_cause_vm_internal);
1295 }
1296
1297 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1298 oop object,
1299 const InflateCause cause) {
1300 // Inflate mutates the heap ...
1301 // Relaxing assertion for bug 6320749.
1302 assert(Universe::verify_in_progress() ||
1303 !SafepointSynchronize::is_at_safepoint(), "invariant");
1304
1305 EventJavaMonitorInflate event;
1306
1307 for (;;) {
1308 const markOop mark = object->mark();
1309 assert(!mark->has_bias_pattern(), "invariant");
1310
1311 // The mark can be in one of the following states:
1312 // * Inflated - just return
1313 // * Stack-locked - coerce it to inflated
1314 // * INFLATING - busy wait for conversion to complete
1315 // * Neutral - aggressively inflate the object.
1316 // * BIASED - Illegal. We should never see this
1317
1318 // CASE: inflated
1319 if (mark->has_monitor()) {
1320 ObjectMonitor * inf = mark->monitor();
1321 markOop dmw = inf->header();
1322 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)dmw));
1323 assert(oopDesc::equals((oop) inf->object(), object), "invariant");
|
112 return 0;
113 }
114
115 #define NINFLATIONLOCKS 256
116 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
117
118 // global list of blocks of monitors
119 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
120 // global monitor free list
121 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
122 // global monitor in-use list, for moribund threads,
123 // monitors they inflated need to be scanned for deflation
124 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
125 // count of entries in gOmInUseList
126 int ObjectSynchronizer::gOmInUseCount = 0;
127
128 static volatile intptr_t gListLock = 0; // protects global monitor lists
129 static volatile int gMonitorFreeCount = 0; // # on gFreeList
130 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
131
132 #define CHECK_THROW_NOSYNC_IMSE(obj) \
133 if ((obj)->mark()->is_always_locked()) { \
134 ResourceMark rm(THREAD); \
135 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
136 }
137
138 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
139 if ((obj)->mark()->is_always_locked()) { \
140 ResourceMark rm(THREAD); \
141 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
142 }
143
144
145 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
146
147
148 // =====================> Quick functions
149
150 // The quick_* forms are special fast-path variants used to improve
151 // performance. In the simplest case, a "quick_*" implementation could
152 // simply return false, in which case the caller will perform the necessary
153 // state transitions and call the slow-path form.
154 // The fast-path is designed to handle frequently arising cases in an efficient
155 // manner and is just a degenerate "optimistic" variant of the slow-path.
156 // returns true -- to indicate the call was satisfied.
157 // returns false -- to indicate the call needs the services of the slow-path.
158 // A no-loitering ordinance is in effect for code in the quick_* family
159 // operators: safepoints or indefinite blocking (blocking that might span a
160 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
161 // entry.
162 //
163 // Consider: An interesting optimization is to have the JIT recognize the
164 // following common idiom:
165 // synchronized (someobj) { .... ; notify(); }
166 // That is, we find a notify() or notifyAll() call that immediately precedes
167 // the monitorexit operation. In that case the JIT could fuse the operations
168 // into a single notifyAndExit() runtime primitive.
169
170 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
171 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
172 assert(self->is_Java_thread(), "invariant");
173 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
174 NoSafepointVerifier nsv;
175 if (obj == NULL) return false; // slow-path for invalid obj
176 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
177 const markOop mark = obj->mark();
178
179 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
180 // Degenerate notify
181 // stack-locked by caller so by definition the implied waitset is empty.
182 return true;
183 }
184
185 if (mark->has_monitor()) {
186 ObjectMonitor * const mon = mark->monitor();
187 assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
188 if (mon->owner() != self) return false; // slow-path for IMS exception
189
190 if (mon->first_waiter() != NULL) {
191 // We have one or more waiters. Since this is an inflated monitor
192 // that we own, we can transfer one or more threads from the waitset
193 // to the entrylist here and now, avoiding the slow-path.
194 if (all) {
195 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
196 } else {
207 }
208
209 // biased locking and any other IMS exception states take the slow-path
210 return false;
211 }
212
213
214 // The LockNode emitted directly at the synchronization site would have
215 // been too big if it were to have included support for the cases of inflated
216 // recursive enter and exit, so they go here instead.
217 // Note that we can't safely call AsyncPrintJavaStack() from within
218 // quick_enter() as our thread state remains _in_Java.
219
220 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
221 BasicLock * lock) {
222 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
223 assert(Self->is_Java_thread(), "invariant");
224 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
225 NoSafepointVerifier nsv;
226 if (obj == NULL) return false; // Need to throw NPE
227 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
228 const markOop mark = obj->mark();
229
230 if (mark->has_monitor()) {
231 ObjectMonitor * const m = mark->monitor();
232 assert(oopDesc::equals((oop) m->object(), obj), "invariant");
233 Thread * const owner = (Thread *) m->_owner;
234
235 // Lock contention and Transactional Lock Elision (TLE) diagnostics
236 // and observability
237 // Case: light contention possibly amenable to TLE
238 // Case: TLE inimical operations such as nested/recursive synchronization
239
240 if (owner == Self) {
241 m->_recursions++;
242 return true;
243 }
244
245 // This Java Monitor is inflated so obj's header will never be
246 // displaced to this thread's BasicLock. Make the displaced header
247 // non-NULL so this BasicLock is not seen as recursive nor as
263
264 // Note that we could inflate in quick_enter.
265 // This is likely a useful optimization
266 // Critically, in quick_enter() we must not:
267 // -- perform bias revocation, or
268 // -- block indefinitely, or
269 // -- reach a safepoint
270
271 return false; // revert to slow-path
272 }
273
274 // -----------------------------------------------------------------------------
275 // Fast Monitor Enter/Exit
276 // This the fast monitor enter. The interpreter and compiler use
277 // some assembly copies of this code. Make sure update those code
278 // if the following function is changed. The implementation is
279 // extremely sensitive to race condition. Be careful.
280
281 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
282 bool attempt_rebias, TRAPS) {
283 CHECK_THROW_NOSYNC_IMSE(obj);
284 if (UseBiasedLocking) {
285 if (!SafepointSynchronize::is_at_safepoint()) {
286 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
287 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
288 return;
289 }
290 } else {
291 assert(!attempt_rebias, "can not rebias toward VM thread");
292 BiasedLocking::revoke_at_safepoint(obj);
293 }
294 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
295 }
296
297 slow_enter(obj, lock, THREAD);
298 }
299
300 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
301 markOop mark = object->mark();
302 if (EnableValhalla && mark->is_always_locked()) {
303 return;
304 }
305 assert(!EnableValhalla || !object->klass()->is_value(), "monitor op on value type");
306 // We cannot check for Biased Locking if we are racing an inflation.
307 assert(mark == markOopDesc::INFLATING() ||
308 !mark->has_bias_pattern(), "should not see bias pattern here");
309
310 markOop dhw = lock->displaced_header();
311 if (dhw == NULL) {
312 // If the displaced header is NULL, then this exit matches up with
313 // a recursive enter. No real work to do here except for diagnostics.
314 #ifndef PRODUCT
315 if (mark != markOopDesc::INFLATING()) {
316 // Only do diagnostics if we are not racing an inflation. Simply
317 // exiting a recursive enter of a Java Monitor that is being
318 // inflated is safe; see the has_monitor() comment below.
319 assert(!mark->is_neutral(), "invariant");
320 assert(!mark->has_locker() ||
321 THREAD->is_lock_owned((address)mark->locker()), "invariant");
322 if (mark->has_monitor()) {
323 // The BasicLock's displaced_header is marked as a recursive
324 // enter and we have an inflated Java Monitor (ObjectMonitor).
325 // This is a special case where the Java Monitor was inflated
339
340 if (mark == (markOop) lock) {
341 // If the object is stack-locked by the current thread, try to
342 // swing the displaced header from the BasicLock back to the mark.
343 assert(dhw->is_neutral(), "invariant");
344 if (object->cas_set_mark(dhw, mark) == mark) {
345 return;
346 }
347 }
348
349 // We have to take the slow-path of possible inflation and then exit.
350 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
351 }
352
353 // -----------------------------------------------------------------------------
354 // Interpreter/Compiler Slow Case
355 // This routine is used to handle interpreter/compiler slow case
356 // We don't need to use fast path here, because it must have been
357 // failed in the interpreter/compiler code.
358 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
359 CHECK_THROW_NOSYNC_IMSE(obj);
360 markOop mark = obj->mark();
361 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
362
363 if (mark->is_neutral()) {
364 // Anticipate successful CAS -- the ST of the displaced mark must
365 // be visible <= the ST performed by the CAS.
366 lock->set_displaced_header(mark);
367 if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
368 return;
369 }
370 // Fall through to inflate() ...
371 } else if (mark->has_locker() &&
372 THREAD->is_lock_owned((address)mark->locker())) {
373 assert(lock != mark->locker(), "must not re-lock the same lock");
374 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
375 lock->set_displaced_header(NULL);
376 return;
377 }
378
379 // The object header will never be displaced to this lock,
388 // We don't need to use fast path here, because it must have
389 // failed in the interpreter/compiler code. Simply use the heavy
390 // weight monitor should be ok, unless someone find otherwise.
391 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
392 fast_exit(object, lock, THREAD);
393 }
394
395 // -----------------------------------------------------------------------------
396 // Class Loader support to workaround deadlocks on the class loader lock objects
397 // Also used by GC
398 // complete_exit()/reenter() are used to wait on a nested lock
399 // i.e. to give up an outer lock completely and then re-enter
400 // Used when holding nested locks - lock acquisition order: lock1 then lock2
401 // 1) complete_exit lock1 - saving recursion count
402 // 2) wait on lock2
403 // 3) when notified on lock2, unlock lock2
404 // 4) reenter lock1 with original recursion count
405 // 5) lock lock2
406 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
407 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
408 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
409 if (UseBiasedLocking) {
410 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
411 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
412 }
413
414 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
415
416 return monitor->complete_exit(THREAD);
417 }
418
419 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
420 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
421 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
422 if (UseBiasedLocking) {
423 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
424 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
425 }
426
427 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
428
429 monitor->reenter(recursion, THREAD);
430 }
431 // -----------------------------------------------------------------------------
432 // JNI locks on java objects
433 // NOTE: must use heavy weight monitor to handle jni monitor enter
434 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
435 // the current locking is from JNI instead of Java code
436 CHECK_THROW_NOSYNC_IMSE(obj);
437 if (UseBiasedLocking) {
438 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
439 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
440 }
441 THREAD->set_current_pending_monitor_is_from_java(false);
442 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
443 THREAD->set_current_pending_monitor_is_from_java(true);
444 }
445
446 // NOTE: must use heavy weight monitor to handle jni monitor exit
447 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
448 CHECK_THROW_NOSYNC_IMSE(obj);
449 if (UseBiasedLocking) {
450 Handle h_obj(THREAD, obj);
451 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
452 obj = h_obj();
453 }
454 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
455
456 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
457 // If this thread has locked the object, exit the monitor. Note: can't use
458 // monitor->check(CHECK); must exit even if an exception is pending.
459 if (monitor->check(THREAD)) {
460 monitor->exit(true, THREAD);
461 }
462 }
463
464 // -----------------------------------------------------------------------------
465 // Internal VM locks on java objects
466 // standard constructor, allows locking failures
467 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
468 _dolock = doLock;
469 _thread = thread;
470 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
471 _obj = obj;
472
473 if (_dolock) {
474 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
475 }
476 }
477
478 ObjectLocker::~ObjectLocker() {
479 if (_dolock) {
480 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
481 }
482 }
483
484
485 // -----------------------------------------------------------------------------
486 // Wait/Notify/NotifyAll
487 // NOTE: must use heavy weight monitor to handle wait()
488 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
489 CHECK_THROW_NOSYNC_IMSE_0(obj);
490 if (UseBiasedLocking) {
491 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
492 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
493 }
494 if (millis < 0) {
495 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
496 }
497 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
498
499 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
500 monitor->wait(millis, true, THREAD);
501
502 // This dummy call is in place to get around dtrace bug 6254741. Once
503 // that's fixed we can uncomment the following line, remove the call
504 // and change this function back into a "void" func.
505 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
506 return dtrace_waited_probe(monitor, obj, THREAD);
507 }
508
509 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
510 CHECK_THROW_NOSYNC_IMSE(obj);
511 if (UseBiasedLocking) {
512 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
513 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
514 }
515 if (millis < 0) {
516 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
517 }
518 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
519 }
520
521 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
522 CHECK_THROW_NOSYNC_IMSE(obj);
523 if (UseBiasedLocking) {
524 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
525 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
526 }
527
528 markOop mark = obj->mark();
529 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
530 return;
531 }
532 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
533 }
534
535 // NOTE: see comment of notify()
536 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
537 CHECK_THROW_NOSYNC_IMSE(obj);
538 if (UseBiasedLocking) {
539 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
540 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
541 }
542
543 markOop mark = obj->mark();
544 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
545 return;
546 }
547 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
548 }
549
550 // -----------------------------------------------------------------------------
551 // Hash Code handling
552 //
553 // Performance concern:
554 // OrderAccess::storestore() calls release() which at one time stored 0
555 // into the global volatile OrderAccess::dummy variable. This store was
556 // unnecessary for correctness. Many threads storing into a common location
557 // causes considerable cache migration or "sloshing" on large SMP systems.
691 // This is probably the best overall implementation -- we'll
692 // likely make this the default in future releases.
693 unsigned t = Self->_hashStateX;
694 t ^= (t << 11);
695 Self->_hashStateX = Self->_hashStateY;
696 Self->_hashStateY = Self->_hashStateZ;
697 Self->_hashStateZ = Self->_hashStateW;
698 unsigned v = Self->_hashStateW;
699 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
700 Self->_hashStateW = v;
701 value = v;
702 }
703
704 value &= markOopDesc::hash_mask;
705 if (value == 0) value = 0xBAD;
706 assert(value != markOopDesc::no_hash, "invariant");
707 return value;
708 }
709
710 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
711 if (EnableValhalla && obj->klass()->is_value()) {
712 // Expected tooling to override hashCode for value type, just don't crash
713 if (log_is_enabled(Debug, monitorinflation)) {
714 ResourceMark rm;
715 log_debug(monitorinflation)("FastHashCode for value type: %s", obj->klass()->external_name());
716 }
717 return obj->klass()->java_mirror()->identity_hash();
718 }
719 if (UseBiasedLocking) {
720 // NOTE: many places throughout the JVM do not expect a safepoint
721 // to be taken here, in particular most operations on perm gen
722 // objects. However, we only ever bias Java instances and all of
723 // the call sites of identity_hash that might revoke biases have
724 // been checked to make sure they can handle a safepoint. The
725 // added check of the bias pattern is to avoid useless calls to
726 // thread-local storage.
727 if (obj->mark()->has_bias_pattern()) {
728 // Handle for oop obj in case of STW safepoint
729 Handle hobj(Self, obj);
730 // Relaxing assertion for bug 6320749.
731 assert(Universe::verify_in_progress() ||
732 !SafepointSynchronize::is_at_safepoint(),
733 "biases should not be seen by VM thread here");
734 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
735 obj = hobj();
736 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
737 }
738 }
803 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)mark));
804 hash = mark->hash();
805 if (hash == 0) {
806 hash = get_next_hash(Self, obj);
807 temp = mark->copy_set_hash(hash); // merge hash code into header
808 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp));
809 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
810 if (test != mark) {
811 // The only update to the header in the monitor (outside GC)
812 // is install the hash code. If someone add new usage of
813 // displaced header, please update this code
814 hash = test->hash();
815 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)test));
816 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
817 }
818 }
819 // We finally get the hash
820 return hash;
821 }
822
823
824 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
825 Handle h_obj) {
826 if (EnableValhalla && h_obj->mark()->is_always_locked()) {
827 return false;
828 }
829 if (UseBiasedLocking) {
830 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
831 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
832 }
833
834 assert(thread == JavaThread::current(), "Can only be called on current thread");
835 oop obj = h_obj();
836
837 markOop mark = ReadStableMark(obj);
838
839 // Uncontended case, header points to stack
840 if (mark->has_locker()) {
841 return thread->is_lock_owned((address)mark->locker());
842 }
843 // Contended case, header points to ObjectMonitor (tagged pointer)
844 if (mark->has_monitor()) {
845 ObjectMonitor* monitor = mark->monitor();
846 return monitor->is_entered(thread) != 0;
847 }
848 // Unlocked case, header in place
1318 }
1319
1320 // Fast path code shared by multiple functions
1321 void ObjectSynchronizer::inflate_helper(oop obj) {
1322 markOop mark = obj->mark();
1323 if (mark->has_monitor()) {
1324 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1325 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1326 return;
1327 }
1328 inflate(Thread::current(), obj, inflate_cause_vm_internal);
1329 }
1330
1331 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1332 oop object,
1333 const InflateCause cause) {
1334 // Inflate mutates the heap ...
1335 // Relaxing assertion for bug 6320749.
1336 assert(Universe::verify_in_progress() ||
1337 !SafepointSynchronize::is_at_safepoint(), "invariant");
1338
1339 if (EnableValhalla) {
1340 guarantee(!object->klass()->is_value(), "Attempt to inflate value type");
1341 }
1342
1343 EventJavaMonitorInflate event;
1344
1345 for (;;) {
1346 const markOop mark = object->mark();
1347 assert(!mark->has_bias_pattern(), "invariant");
1348
1349 // The mark can be in one of the following states:
1350 // * Inflated - just return
1351 // * Stack-locked - coerce it to inflated
1352 // * INFLATING - busy wait for conversion to complete
1353 // * Neutral - aggressively inflate the object.
1354 // * BIASED - Illegal. We should never see this
1355
1356 // CASE: inflated
1357 if (mark->has_monitor()) {
1358 ObjectMonitor * inf = mark->monitor();
1359 markOop dmw = inf->header();
1360 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)dmw));
1361 assert(oopDesc::equals((oop) inf->object(), object), "invariant");
|