143 // The fast-path is designed to handle frequently arising cases in an efficient
144 // manner and is just a degenerate "optimistic" variant of the slow-path.
145 // returns true -- to indicate the call was satisfied.
146 // returns false -- to indicate the call needs the services of the slow-path.
147 // A no-loitering ordinance is in effect for code in the quick_* family
148 // operators: safepoints or indefinite blocking (blocking that might span a
149 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
150 // entry.
151 //
152 // Consider: An interesting optimization is to have the JIT recognize the
153 // following common idiom:
154 // synchronized (someobj) { .... ; notify(); }
155 // That is, we find a notify() or notifyAll() call that immediately precedes
156 // the monitorexit operation. In that case the JIT could fuse the operations
157 // into a single notifyAndExit() runtime primitive.
158
159 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
161 assert(self->is_Java_thread(), "invariant");
162 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
163 No_Safepoint_Verifier nsv;
164 if (obj == NULL) return false; // slow-path for invalid obj
165 const markOop mark = obj->mark();
166
167 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
168 // Degenerate notify
169 // stack-locked by caller so by definition the implied waitset is empty.
170 return true;
171 }
172
173 if (mark->has_monitor()) {
174 ObjectMonitor * const mon = mark->monitor();
175 assert(mon->object() == obj, "invariant");
176 if (mon->owner() != self) return false; // slow-path for IMS exception
177
178 if (mon->first_waiter() != NULL) {
179 // We have one or more waiters. Since this is an inflated monitor
180 // that we own, we can transfer one or more threads from the waitset
181 // to the entrylist here and now, avoiding the slow-path.
182 if (all) {
193 }
194 return true;
195 }
196
197 // biased locking and any other IMS exception states take the slow-path
198 return false;
199 }
200
201
202 // The LockNode emitted directly at the synchronization site would have
203 // been too big if it were to have included support for the cases of inflated
204 // recursive enter and exit, so they go here instead.
205 // Note that we can't safely call AsyncPrintJavaStack() from within
206 // quick_enter() as our thread state remains _in_Java.
207
208 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
209 BasicLock * Lock) {
210 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
211 assert(Self->is_Java_thread(), "invariant");
212 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
213 No_Safepoint_Verifier nsv;
214 if (obj == NULL) return false; // Need to throw NPE
215 const markOop mark = obj->mark();
216
217 if (mark->has_monitor()) {
218 ObjectMonitor * const m = mark->monitor();
219 assert(m->object() == obj, "invariant");
220 Thread * const owner = (Thread *) m->_owner;
221
222 // Lock contention and Transactional Lock Elision (TLE) diagnostics
223 // and observability
224 // Case: light contention possibly amenable to TLE
225 // Case: TLE inimical operations such as nested/recursive synchronization
226
227 if (owner == Self) {
228 m->_recursions++;
229 return true;
230 }
231
232 if (owner == NULL &&
239
240 // Note that we could inflate in quick_enter.
241 // This is likely a useful optimization
242 // Critically, in quick_enter() we must not:
243 // -- perform bias revocation, or
244 // -- block indefinitely, or
245 // -- reach a safepoint
246
247 return false; // revert to slow-path
248 }
249
250 // -----------------------------------------------------------------------------
251 // Fast Monitor Enter/Exit
252 // This the fast monitor enter. The interpreter and compiler use
253 // some assembly copies of this code. Make sure update those code
254 // if the following function is changed. The implementation is
255 // extremely sensitive to race condition. Be careful.
256
257 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
258 bool attempt_rebias, TRAPS) {
259 if (UseBiasedLocking) {
260 if (!SafepointSynchronize::is_at_safepoint()) {
261 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
262 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
263 return;
264 }
265 } else {
266 assert(!attempt_rebias, "can not rebias toward VM thread");
267 BiasedLocking::revoke_at_safepoint(obj);
268 }
269 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
270 }
271
272 slow_enter(obj, lock, THREAD);
273 }
274
275 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
276 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
277 // if displaced header is null, the previous enter is recursive enter, no-op
278 markOop dhw = lock->displaced_header();
279 markOop mark;
280 if (dhw == NULL) {
281 // Recursive stack-lock.
282 // Diagnostics -- Could be: stack-locked, inflating, inflated.
283 mark = object->mark();
284 assert(!mark->is_neutral(), "invariant");
285 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
286 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
287 }
288 if (mark->has_monitor()) {
289 ObjectMonitor * m = mark->monitor();
290 assert(((oop)(m->object()))->mark() == mark, "invariant");
291 assert(m->is_entered(THREAD), "invariant");
292 }
293 return;
294 }
295
296 mark = object->mark();
297
298 // If the object is stack-locked by the current thread, try to
299 // swing the displaced header from the box back to the mark.
300 if (mark == (markOop) lock) {
301 assert(dhw->is_neutral(), "invariant");
302 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
303 TEVENT(fast_exit: release stacklock);
304 return;
305 }
306 }
307
308 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD);
309 }
310
311 // -----------------------------------------------------------------------------
312 // Interpreter/Compiler Slow Case
313 // This routine is used to handle interpreter/compiler slow case
314 // We don't need to use fast path here, because it must have been
315 // failed in the interpreter/compiler code.
316 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
317 markOop mark = obj->mark();
318 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
319
320 if (mark->is_neutral()) {
321 // Anticipate successful CAS -- the ST of the displaced mark must
322 // be visible <= the ST performed by the CAS.
323 lock->set_displaced_header(mark);
324 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
325 TEVENT(slow_enter: release stacklock);
326 return;
327 }
328 // Fall through to inflate() ...
329 } else if (mark->has_locker() &&
330 THREAD->is_lock_owned((address)mark->locker())) {
331 assert(lock != mark->locker(), "must not re-lock the same lock");
332 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
333 lock->set_displaced_header(NULL);
334 return;
335 }
336
347 // failed in the interpreter/compiler code. Simply use the heavy
348 // weight monitor should be ok, unless someone find otherwise.
349 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
350 fast_exit(object, lock, THREAD);
351 }
352
353 // -----------------------------------------------------------------------------
354 // Class Loader support to workaround deadlocks on the class loader lock objects
355 // Also used by GC
356 // complete_exit()/reenter() are used to wait on a nested lock
357 // i.e. to give up an outer lock completely and then re-enter
358 // Used when holding nested locks - lock acquisition order: lock1 then lock2
359 // 1) complete_exit lock1 - saving recursion count
360 // 2) wait on lock2
361 // 3) when notified on lock2, unlock lock2
362 // 4) reenter lock1 with original recursion count
363 // 5) lock lock2
364 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
365 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
366 TEVENT(complete_exit);
367 if (UseBiasedLocking) {
368 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
369 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
370 }
371
372 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
373
374 return monitor->complete_exit(THREAD);
375 }
376
377 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
378 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
379 TEVENT(reenter);
380 if (UseBiasedLocking) {
381 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
382 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
383 }
384
385 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
386
387 monitor->reenter(recursion, THREAD);
388 }
389 // -----------------------------------------------------------------------------
390 // JNI locks on java objects
391 // NOTE: must use heavy weight monitor to handle jni monitor enter
392 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
393 // the current locking is from JNI instead of Java code
394 TEVENT(jni_enter);
395 if (UseBiasedLocking) {
396 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
397 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
398 }
399 THREAD->set_current_pending_monitor_is_from_java(false);
400 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
401 THREAD->set_current_pending_monitor_is_from_java(true);
402 }
403
404 // NOTE: must use heavy weight monitor to handle jni monitor exit
405 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
406 TEVENT(jni_exit);
407 if (UseBiasedLocking) {
408 Handle h_obj(THREAD, obj);
409 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
410 obj = h_obj();
411 }
412 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
413
414 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
415 // If this thread has locked the object, exit the monitor. Note: can't use
416 // monitor->check(CHECK); must exit even if an exception is pending.
417 if (monitor->check(THREAD)) {
418 monitor->exit(true, THREAD);
419 }
420 }
421
422 // -----------------------------------------------------------------------------
423 // Internal VM locks on java objects
424 // standard constructor, allows locking failures
425 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
426 _dolock = doLock;
429 _obj = obj;
430
431 if (_dolock) {
432 TEVENT(ObjectLocker);
433
434 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
435 }
436 }
437
438 ObjectLocker::~ObjectLocker() {
439 if (_dolock) {
440 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
441 }
442 }
443
444
445 // -----------------------------------------------------------------------------
446 // Wait/Notify/NotifyAll
447 // NOTE: must use heavy weight monitor to handle wait()
448 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
449 if (UseBiasedLocking) {
450 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
451 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
452 }
453 if (millis < 0) {
454 TEVENT(wait - throw IAX);
455 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
456 }
457 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
458 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
459 monitor->wait(millis, true, THREAD);
460
461 // This dummy call is in place to get around dtrace bug 6254741. Once
462 // that's fixed we can uncomment the following line, remove the call
463 // and change this function back into a "void" func.
464 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
465 return dtrace_waited_probe(monitor, obj, THREAD);
466 }
467
468 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
469 if (UseBiasedLocking) {
470 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
471 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
472 }
473 if (millis < 0) {
474 TEVENT(wait - throw IAX);
475 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
476 }
477 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD);
478 }
479
480 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
481 if (UseBiasedLocking) {
482 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
483 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
484 }
485
486 markOop mark = obj->mark();
487 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
488 return;
489 }
490 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
491 }
492
493 // NOTE: see comment of notify()
494 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
495 if (UseBiasedLocking) {
496 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
497 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
498 }
499
500 markOop mark = obj->mark();
501 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
502 return;
503 }
504 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
505 }
506
507 // -----------------------------------------------------------------------------
508 // Hash Code handling
509 //
510 // Performance concern:
511 // OrderAccess::storestore() calls release() which at one time stored 0
512 // into the global volatile OrderAccess::dummy variable. This store was
513 // unnecessary for correctness. Many threads storing into a common location
514 // causes considerable cache migration or "sloshing" on large SMP systems.
652 // likely make this the default in future releases.
653 unsigned t = Self->_hashStateX;
654 t ^= (t << 11);
655 Self->_hashStateX = Self->_hashStateY;
656 Self->_hashStateY = Self->_hashStateZ;
657 Self->_hashStateZ = Self->_hashStateW;
658 unsigned v = Self->_hashStateW;
659 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
660 Self->_hashStateW = v;
661 value = v;
662 }
663
664 value &= markOopDesc::hash_mask;
665 if (value == 0) value = 0xBAD;
666 assert(value != markOopDesc::no_hash, "invariant");
667 TEVENT(hashCode: GENERATE);
668 return value;
669 }
670
671 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
672 if (UseBiasedLocking) {
673 // NOTE: many places throughout the JVM do not expect a safepoint
674 // to be taken here, in particular most operations on perm gen
675 // objects. However, we only ever bias Java instances and all of
676 // the call sites of identity_hash that might revoke biases have
677 // been checked to make sure they can handle a safepoint. The
678 // added check of the bias pattern is to avoid useless calls to
679 // thread-local storage.
680 if (obj->mark()->has_bias_pattern()) {
681 // Handle for oop obj in case of STW safepoint
682 Handle hobj(Self, obj);
683 // Relaxing assertion for bug 6320749.
684 assert(Universe::verify_in_progress() ||
685 !SafepointSynchronize::is_at_safepoint(),
686 "biases should not be seen by VM thread here");
687 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
688 obj = hobj();
689 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
690 }
691 }
765 // is install the hash code. If someone add new usage of
766 // displaced header, please update this code
767 hash = test->hash();
768 assert(test->is_neutral(), "invariant");
769 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
770 }
771 }
772 // We finally get the hash
773 return hash;
774 }
775
776 // Deprecated -- use FastHashCode() instead.
777
778 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
779 return FastHashCode(Thread::current(), obj());
780 }
781
782
783 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
784 Handle h_obj) {
785 if (UseBiasedLocking) {
786 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
787 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
788 }
789
790 assert(thread == JavaThread::current(), "Can only be called on current thread");
791 oop obj = h_obj();
792
793 markOop mark = ReadStableMark(obj);
794
795 // Uncontended case, header points to stack
796 if (mark->has_locker()) {
797 return thread->is_lock_owned((address)mark->locker());
798 }
799 // Contended case, header points to ObjectMonitor (tagged pointer)
800 if (mark->has_monitor()) {
801 ObjectMonitor* monitor = mark->monitor();
802 return monitor->is_entered(thread) != 0;
803 }
804 // Unlocked case, header in place
837 owner_self : owner_other;
838 }
839
840 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
841 // The Object:ObjectMonitor relationship is stable as long as we're
842 // not at a safepoint.
843 if (mark->has_monitor()) {
844 void * owner = mark->monitor()->_owner;
845 if (owner == NULL) return owner_none;
846 return (owner == self ||
847 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
848 }
849
850 // CASE: neutral
851 assert(mark->is_neutral(), "sanity check");
852 return owner_none; // it's unlocked
853 }
854
855 // FIXME: jvmti should call this
856 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
857 if (UseBiasedLocking) {
858 if (SafepointSynchronize::is_at_safepoint()) {
859 BiasedLocking::revoke_at_safepoint(h_obj);
860 } else {
861 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
862 }
863 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
864 }
865
866 oop obj = h_obj();
867 address owner = NULL;
868
869 markOop mark = ReadStableMark(obj);
870
871 // Uncontended case, header points to stack
872 if (mark->has_locker()) {
873 owner = (address) mark->locker();
874 }
875
876 // Contended case, header points to ObjectMonitor (tagged pointer)
1258
1259 Thread::muxAcquire(&gListLock, "omFlush");
1260 if (tail != NULL) {
1261 tail->FreeNext = gFreeList;
1262 gFreeList = list;
1263 gMonitorFreeCount += tally;
1264 }
1265
1266 if (inUseTail != NULL) {
1267 inUseTail->FreeNext = gOmInUseList;
1268 gOmInUseList = inUseList;
1269 gOmInUseCount += inUseTally;
1270 }
1271
1272 Thread::muxRelease(&gListLock);
1273 TEVENT(omFlush);
1274 }
1275
1276 // Fast path code shared by multiple functions
1277 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1278 markOop mark = obj->mark();
1279 if (mark->has_monitor()) {
1280 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1281 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1282 return mark->monitor();
1283 }
1284 return ObjectSynchronizer::inflate(Thread::current(), obj);
1285 }
1286
1287
1288 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
1289 oop object) {
1290 // Inflate mutates the heap ...
1291 // Relaxing assertion for bug 6320749.
1292 assert(Universe::verify_in_progress() ||
1293 !SafepointSynchronize::is_at_safepoint(), "invariant");
1294
1295 for (;;) {
1296 const markOop mark = object->mark();
1297 assert(!mark->has_bias_pattern(), "invariant");
1298
1299 // The mark can be in one of the following states:
1300 // * Inflated - just return
1301 // * Stack-locked - coerce it to inflated
1302 // * INFLATING - busy wait for conversion to complete
1303 // * Neutral - aggressively inflate the object.
1304 // * BIASED - Illegal. We should never see this
1305
1306 // CASE: inflated
1307 if (mark->has_monitor()) {
1308 ObjectMonitor * inf = mark->monitor();
1309 assert(inf->header()->is_neutral(), "invariant");
1310 assert(inf->object() == object, "invariant");
1311 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1312 return inf;
1313 }
1314
1315 // CASE: inflation in progress - inflating over a stack-lock.
1316 // Some other thread is converting from stack-locked to inflated.
1317 // Only that thread can complete inflation -- other threads must wait.
1318 // The INFLATING value is transient.
1319 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1320 // We could always eliminate polling by parking the thread on some auxiliary list.
1321 if (mark == markOopDesc::INFLATING()) {
1322 TEVENT(Inflate: spin while INFLATING);
1323 ReadStableMark(object);
1324 continue;
1325 }
1326
1327 // CASE: stack-locked
1328 // Could be stack-locked either by this thread or by some other thread.
1329 //
1330 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1496 // An alternative could have used a single global in-use list. The
1497 // downside would have been the additional cost of acquiring the global list lock
1498 // for every omAlloc().
1499 //
1500 // Perversely, the heap size -- and thus the STW safepoint rate --
1501 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1502 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1503 // This is an unfortunate aspect of this design.
1504
1505 enum ManifestConstants {
1506 ClearResponsibleAtSTW = 0
1507 };
1508
1509 // Deflate a single monitor if not in-use
1510 // Return true if deflated, false if in-use
1511 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1512 ObjectMonitor** freeHeadp,
1513 ObjectMonitor** freeTailp) {
1514 bool deflated;
1515 // Normal case ... The monitor is associated with obj.
1516 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1517 guarantee(mid == obj->mark()->monitor(), "invariant");
1518 guarantee(mid->header()->is_neutral(), "invariant");
1519
1520 if (mid->is_busy()) {
1521 if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1522 deflated = false;
1523 } else {
1524 // Deflate the monitor if it is no longer being used
1525 // It's idle - scavenge and return to the global free list
1526 // plain old deflation ...
1527 TEVENT(deflate_idle_monitors - scavenge1);
1528 if (TraceMonitorInflation) {
1529 if (obj->is_instance()) {
1530 ResourceMark rm;
1531 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1532 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1533 }
1534 }
1535
1781 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1782 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1783 "to the struct end than a cache line which permits false "
1784 "sharing.");
1785 (*warning_cnt_ptr)++;
1786 }
1787 }
1788 }
1789
1790 #ifndef PRODUCT
1791
1792 // Verify all monitors in the monitor cache, the verification is weak.
1793 void ObjectSynchronizer::verify() {
1794 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1795 ObjectMonitor* mid;
1796 while (block) {
1797 assert(block->object() == CHAINMARKER, "must be a block header");
1798 for (int i = 1; i < _BLOCKSIZE; i++) {
1799 mid = (ObjectMonitor *)(block + i);
1800 oop object = (oop) mid->object();
1801 if (object != NULL) {
1802 mid->verify();
1803 }
1804 }
1805 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
1806 }
1807 }
1808
1809 // Check if monitor belongs to the monitor cache
1810 // The list is grow-only so it's *relatively* safe to traverse
1811 // the list of extant blocks without taking a lock.
1812
1813 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1814 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1815
1816 while (block) {
1817 assert(block->object() == CHAINMARKER, "must be a block header");
1818 if (monitor > (ObjectMonitor *)&block[0] &&
1819 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1820 address mon = (address) monitor;
|
143 // The fast-path is designed to handle frequently arising cases in an efficient
144 // manner and is just a degenerate "optimistic" variant of the slow-path.
145 // returns true -- to indicate the call was satisfied.
146 // returns false -- to indicate the call needs the services of the slow-path.
147 // A no-loitering ordinance is in effect for code in the quick_* family
148 // operators: safepoints or indefinite blocking (blocking that might span a
149 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
150 // entry.
151 //
152 // Consider: An interesting optimization is to have the JIT recognize the
153 // following common idiom:
154 // synchronized (someobj) { .... ; notify(); }
155 // That is, we find a notify() or notifyAll() call that immediately precedes
156 // the monitorexit operation. In that case the JIT could fuse the operations
157 // into a single notifyAndExit() runtime primitive.
158
159 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
161 assert(self->is_Java_thread(), "invariant");
162 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
163 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy");
164 No_Safepoint_Verifier nsv;
165 if (obj == NULL) return false; // slow-path for invalid obj
166 const markOop mark = obj->mark();
167
168 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
169 // Degenerate notify
170 // stack-locked by caller so by definition the implied waitset is empty.
171 return true;
172 }
173
174 if (mark->has_monitor()) {
175 ObjectMonitor * const mon = mark->monitor();
176 assert(mon->object() == obj, "invariant");
177 if (mon->owner() != self) return false; // slow-path for IMS exception
178
179 if (mon->first_waiter() != NULL) {
180 // We have one or more waiters. Since this is an inflated monitor
181 // that we own, we can transfer one or more threads from the waitset
182 // to the entrylist here and now, avoiding the slow-path.
183 if (all) {
194 }
195 return true;
196 }
197
198 // biased locking and any other IMS exception states take the slow-path
199 return false;
200 }
201
202
203 // The LockNode emitted directly at the synchronization site would have
204 // been too big if it were to have included support for the cases of inflated
205 // recursive enter and exit, so they go here instead.
206 // Note that we can't safely call AsyncPrintJavaStack() from within
207 // quick_enter() as our thread state remains _in_Java.
208
209 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
210 BasicLock * Lock) {
211 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
212 assert(Self->is_Java_thread(), "invariant");
213 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
214 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy");
215 No_Safepoint_Verifier nsv;
216 if (obj == NULL) return false; // Need to throw NPE
217 const markOop mark = obj->mark();
218
219 if (mark->has_monitor()) {
220 ObjectMonitor * const m = mark->monitor();
221 assert(m->object() == obj, "invariant");
222 Thread * const owner = (Thread *) m->_owner;
223
224 // Lock contention and Transactional Lock Elision (TLE) diagnostics
225 // and observability
226 // Case: light contention possibly amenable to TLE
227 // Case: TLE inimical operations such as nested/recursive synchronization
228
229 if (owner == Self) {
230 m->_recursions++;
231 return true;
232 }
233
234 if (owner == NULL &&
241
242 // Note that we could inflate in quick_enter.
243 // This is likely a useful optimization
244 // Critically, in quick_enter() we must not:
245 // -- perform bias revocation, or
246 // -- block indefinitely, or
247 // -- reach a safepoint
248
249 return false; // revert to slow-path
250 }
251
252 // -----------------------------------------------------------------------------
253 // Fast Monitor Enter/Exit
254 // This the fast monitor enter. The interpreter and compiler use
255 // some assembly copies of this code. Make sure update those code
256 // if the following function is changed. The implementation is
257 // extremely sensitive to race condition. Be careful.
258
259 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
260 bool attempt_rebias, TRAPS) {
261 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
262 if (UseBiasedLocking) {
263 if (!SafepointSynchronize::is_at_safepoint()) {
264 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
265 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
266 return;
267 }
268 } else {
269 assert(!attempt_rebias, "can not rebias toward VM thread");
270 BiasedLocking::revoke_at_safepoint(obj);
271 }
272 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
273 }
274
275 slow_enter(obj, lock, THREAD);
276 }
277
278 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
279 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
280 // if displaced header is null, the previous enter is recursive enter, no-op
281 assert(object == oopDesc::bs()->resolve_and_maybe_copy_oop(object), "expect to-space copy");
282
283 markOop dhw = lock->displaced_header();
284 markOop mark;
285 if (dhw == NULL) {
286 // Recursive stack-lock.
287 // Diagnostics -- Could be: stack-locked, inflating, inflated.
288 mark = object->mark();
289 assert(!mark->is_neutral(), "invariant");
290 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
291 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
292 }
293 if (mark->has_monitor()) {
294 ObjectMonitor * m = mark->monitor();
295 assert(((oop)(m->object()))->mark() == mark, "invariant");
296 assert(m->is_entered(THREAD), "invariant");
297 }
298 return;
299 }
300
301 mark = object->mark();
302
303 // If the object is stack-locked by the current thread, try to
304 // swing the displaced header from the box back to the mark.
305 if (mark == (markOop) lock) {
306 assert(dhw->is_neutral(), "invariant");
307 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
308 TEVENT(fast_exit: release stacklock);
309 return;
310 }
311 }
312
313 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD);
314 }
315
316 // -----------------------------------------------------------------------------
317 // Interpreter/Compiler Slow Case
318 // This routine is used to handle interpreter/compiler slow case
319 // We don't need to use fast path here, because it must have been
320 // failed in the interpreter/compiler code.
321 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
322 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
323 markOop mark = obj->mark();
324 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
325
326 if (mark->is_neutral()) {
327 // Anticipate successful CAS -- the ST of the displaced mark must
328 // be visible <= the ST performed by the CAS.
329 lock->set_displaced_header(mark);
330 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
331 TEVENT(slow_enter: release stacklock);
332 return;
333 }
334 // Fall through to inflate() ...
335 } else if (mark->has_locker() &&
336 THREAD->is_lock_owned((address)mark->locker())) {
337 assert(lock != mark->locker(), "must not re-lock the same lock");
338 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
339 lock->set_displaced_header(NULL);
340 return;
341 }
342
353 // failed in the interpreter/compiler code. Simply use the heavy
354 // weight monitor should be ok, unless someone find otherwise.
355 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
356 fast_exit(object, lock, THREAD);
357 }
358
359 // -----------------------------------------------------------------------------
360 // Class Loader support to workaround deadlocks on the class loader lock objects
361 // Also used by GC
362 // complete_exit()/reenter() are used to wait on a nested lock
363 // i.e. to give up an outer lock completely and then re-enter
364 // Used when holding nested locks - lock acquisition order: lock1 then lock2
365 // 1) complete_exit lock1 - saving recursion count
366 // 2) wait on lock2
367 // 3) when notified on lock2, unlock lock2
368 // 4) reenter lock1 with original recursion count
369 // 5) lock lock2
370 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
371 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
372 TEVENT(complete_exit);
373 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
374 if (UseBiasedLocking) {
375 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
376 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
377 }
378
379 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
380
381 return monitor->complete_exit(THREAD);
382 }
383
384 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
385 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
386 TEVENT(reenter);
387 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
388 if (UseBiasedLocking) {
389 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
390 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
391 }
392
393 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
394
395 monitor->reenter(recursion, THREAD);
396 }
397 // -----------------------------------------------------------------------------
398 // JNI locks on java objects
399 // NOTE: must use heavy weight monitor to handle jni monitor enter
400 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
401 // the current locking is from JNI instead of Java code
402 TEVENT(jni_enter);
403 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
404 if (UseBiasedLocking) {
405 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
406 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
407 }
408 THREAD->set_current_pending_monitor_is_from_java(false);
409 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
410 THREAD->set_current_pending_monitor_is_from_java(true);
411 }
412
413 // NOTE: must use heavy weight monitor to handle jni monitor exit
414 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
415 TEVENT(jni_exit);
416 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy");
417 if (UseBiasedLocking) {
418 Handle h_obj(THREAD, obj);
419 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
420 obj = h_obj();
421 }
422 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
423
424 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
425 // If this thread has locked the object, exit the monitor. Note: can't use
426 // monitor->check(CHECK); must exit even if an exception is pending.
427 if (monitor->check(THREAD)) {
428 monitor->exit(true, THREAD);
429 }
430 }
431
432 // -----------------------------------------------------------------------------
433 // Internal VM locks on java objects
434 // standard constructor, allows locking failures
435 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
436 _dolock = doLock;
439 _obj = obj;
440
441 if (_dolock) {
442 TEVENT(ObjectLocker);
443
444 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
445 }
446 }
447
448 ObjectLocker::~ObjectLocker() {
449 if (_dolock) {
450 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
451 }
452 }
453
454
455 // -----------------------------------------------------------------------------
456 // Wait/Notify/NotifyAll
457 // NOTE: must use heavy weight monitor to handle wait()
458 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
459 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
460 if (UseBiasedLocking) {
461 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
462 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
463 }
464 if (millis < 0) {
465 TEVENT(wait - throw IAX);
466 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
467 }
468 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
469 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
470 monitor->wait(millis, true, THREAD);
471
472 // This dummy call is in place to get around dtrace bug 6254741. Once
473 // that's fixed we can uncomment the following line, remove the call
474 // and change this function back into a "void" func.
475 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
476 return dtrace_waited_probe(monitor, obj, THREAD);
477 }
478
479 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
480 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
481 if (UseBiasedLocking) {
482 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
483 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
484 }
485 if (millis < 0) {
486 TEVENT(wait - throw IAX);
487 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
488 }
489 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD);
490 }
491
492 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
493 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
494 if (UseBiasedLocking) {
495 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
496 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
497 }
498
499 markOop mark = obj->mark();
500 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
501 return;
502 }
503 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
504 }
505
506 // NOTE: see comment of notify()
507 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
508 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy");
509 if (UseBiasedLocking) {
510 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
511 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
512 }
513
514 markOop mark = obj->mark();
515 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
516 return;
517 }
518 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
519 }
520
521 // -----------------------------------------------------------------------------
522 // Hash Code handling
523 //
524 // Performance concern:
525 // OrderAccess::storestore() calls release() which at one time stored 0
526 // into the global volatile OrderAccess::dummy variable. This store was
527 // unnecessary for correctness. Many threads storing into a common location
528 // causes considerable cache migration or "sloshing" on large SMP systems.
666 // likely make this the default in future releases.
667 unsigned t = Self->_hashStateX;
668 t ^= (t << 11);
669 Self->_hashStateX = Self->_hashStateY;
670 Self->_hashStateY = Self->_hashStateZ;
671 Self->_hashStateZ = Self->_hashStateW;
672 unsigned v = Self->_hashStateW;
673 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
674 Self->_hashStateW = v;
675 value = v;
676 }
677
678 value &= markOopDesc::hash_mask;
679 if (value == 0) value = 0xBAD;
680 assert(value != markOopDesc::no_hash, "invariant");
681 TEVENT(hashCode: GENERATE);
682 return value;
683 }
684
685 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
686 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy");
687 if (UseBiasedLocking) {
688 // NOTE: many places throughout the JVM do not expect a safepoint
689 // to be taken here, in particular most operations on perm gen
690 // objects. However, we only ever bias Java instances and all of
691 // the call sites of identity_hash that might revoke biases have
692 // been checked to make sure they can handle a safepoint. The
693 // added check of the bias pattern is to avoid useless calls to
694 // thread-local storage.
695 if (obj->mark()->has_bias_pattern()) {
696 // Handle for oop obj in case of STW safepoint
697 Handle hobj(Self, obj);
698 // Relaxing assertion for bug 6320749.
699 assert(Universe::verify_in_progress() ||
700 !SafepointSynchronize::is_at_safepoint(),
701 "biases should not be seen by VM thread here");
702 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
703 obj = hobj();
704 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
705 }
706 }
780 // is install the hash code. If someone add new usage of
781 // displaced header, please update this code
782 hash = test->hash();
783 assert(test->is_neutral(), "invariant");
784 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
785 }
786 }
787 // We finally get the hash
788 return hash;
789 }
790
791 // Deprecated -- use FastHashCode() instead.
792
793 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
794 return FastHashCode(Thread::current(), obj());
795 }
796
797
798 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
799 Handle h_obj) {
800 assert(h_obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(h_obj()), "expect to-space copy");
801 if (UseBiasedLocking) {
802 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
803 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
804 }
805
806 assert(thread == JavaThread::current(), "Can only be called on current thread");
807 oop obj = h_obj();
808
809 markOop mark = ReadStableMark(obj);
810
811 // Uncontended case, header points to stack
812 if (mark->has_locker()) {
813 return thread->is_lock_owned((address)mark->locker());
814 }
815 // Contended case, header points to ObjectMonitor (tagged pointer)
816 if (mark->has_monitor()) {
817 ObjectMonitor* monitor = mark->monitor();
818 return monitor->is_entered(thread) != 0;
819 }
820 // Unlocked case, header in place
853 owner_self : owner_other;
854 }
855
856 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
857 // The Object:ObjectMonitor relationship is stable as long as we're
858 // not at a safepoint.
859 if (mark->has_monitor()) {
860 void * owner = mark->monitor()->_owner;
861 if (owner == NULL) return owner_none;
862 return (owner == self ||
863 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
864 }
865
866 // CASE: neutral
867 assert(mark->is_neutral(), "sanity check");
868 return owner_none; // it's unlocked
869 }
870
871 // FIXME: jvmti should call this
872 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
873 assert(h_obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(h_obj()), "expect to-space copy");
874 if (UseBiasedLocking) {
875 if (SafepointSynchronize::is_at_safepoint()) {
876 BiasedLocking::revoke_at_safepoint(h_obj);
877 } else {
878 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
879 }
880 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
881 }
882
883 oop obj = h_obj();
884 address owner = NULL;
885
886 markOop mark = ReadStableMark(obj);
887
888 // Uncontended case, header points to stack
889 if (mark->has_locker()) {
890 owner = (address) mark->locker();
891 }
892
893 // Contended case, header points to ObjectMonitor (tagged pointer)
1275
1276 Thread::muxAcquire(&gListLock, "omFlush");
1277 if (tail != NULL) {
1278 tail->FreeNext = gFreeList;
1279 gFreeList = list;
1280 gMonitorFreeCount += tally;
1281 }
1282
1283 if (inUseTail != NULL) {
1284 inUseTail->FreeNext = gOmInUseList;
1285 gOmInUseList = inUseList;
1286 gOmInUseCount += inUseTally;
1287 }
1288
1289 Thread::muxRelease(&gListLock);
1290 TEVENT(omFlush);
1291 }
1292
1293 // Fast path code shared by multiple functions
1294 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1295 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy");
1296 markOop mark = obj->mark();
1297 if (mark->has_monitor()) {
1298 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1299 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1300 return mark->monitor();
1301 }
1302 return ObjectSynchronizer::inflate(Thread::current(), obj);
1303 }
1304
1305
1306 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
1307 oop object) {
1308 // Inflate mutates the heap ...
1309 // Relaxing assertion for bug 6320749.
1310 assert(object == oopDesc::bs()->resolve_and_maybe_copy_oop(object), "expect to-space copy");
1311 assert(Universe::verify_in_progress() ||
1312 !SafepointSynchronize::is_at_safepoint(), "invariant");
1313
1314 for (;;) {
1315 const markOop mark = object->mark();
1316 assert(!mark->has_bias_pattern(), "invariant");
1317
1318 // The mark can be in one of the following states:
1319 // * Inflated - just return
1320 // * Stack-locked - coerce it to inflated
1321 // * INFLATING - busy wait for conversion to complete
1322 // * Neutral - aggressively inflate the object.
1323 // * BIASED - Illegal. We should never see this
1324
1325 // CASE: inflated
1326 if (mark->has_monitor()) {
1327 ObjectMonitor * inf = mark->monitor();
1328 assert(inf->header()->is_neutral(), "invariant");
1329 assert((oop) inf->object() == object, "invariant");
1330 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1331 return inf;
1332 }
1333
1334 // CASE: inflation in progress - inflating over a stack-lock.
1335 // Some other thread is converting from stack-locked to inflated.
1336 // Only that thread can complete inflation -- other threads must wait.
1337 // The INFLATING value is transient.
1338 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1339 // We could always eliminate polling by parking the thread on some auxiliary list.
1340 if (mark == markOopDesc::INFLATING()) {
1341 TEVENT(Inflate: spin while INFLATING);
1342 ReadStableMark(object);
1343 continue;
1344 }
1345
1346 // CASE: stack-locked
1347 // Could be stack-locked either by this thread or by some other thread.
1348 //
1349 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1515 // An alternative could have used a single global in-use list. The
1516 // downside would have been the additional cost of acquiring the global list lock
1517 // for every omAlloc().
1518 //
1519 // Perversely, the heap size -- and thus the STW safepoint rate --
1520 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1521 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1522 // This is an unfortunate aspect of this design.
1523
1524 enum ManifestConstants {
1525 ClearResponsibleAtSTW = 0
1526 };
1527
1528 // Deflate a single monitor if not in-use
1529 // Return true if deflated, false if in-use
1530 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1531 ObjectMonitor** freeHeadp,
1532 ObjectMonitor** freeTailp) {
1533 bool deflated;
1534 // Normal case ... The monitor is associated with obj.
1535 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy");
1536 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1537 guarantee(mid == obj->mark()->monitor(), "invariant");
1538 guarantee(mid->header()->is_neutral(), "invariant");
1539
1540 if (mid->is_busy()) {
1541 if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1542 deflated = false;
1543 } else {
1544 // Deflate the monitor if it is no longer being used
1545 // It's idle - scavenge and return to the global free list
1546 // plain old deflation ...
1547 TEVENT(deflate_idle_monitors - scavenge1);
1548 if (TraceMonitorInflation) {
1549 if (obj->is_instance()) {
1550 ResourceMark rm;
1551 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1552 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1553 }
1554 }
1555
1801 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1802 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1803 "to the struct end than a cache line which permits false "
1804 "sharing.");
1805 (*warning_cnt_ptr)++;
1806 }
1807 }
1808 }
1809
1810 #ifndef PRODUCT
1811
1812 // Verify all monitors in the monitor cache, the verification is weak.
1813 void ObjectSynchronizer::verify() {
1814 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1815 ObjectMonitor* mid;
1816 while (block) {
1817 assert(block->object() == CHAINMARKER, "must be a block header");
1818 for (int i = 1; i < _BLOCKSIZE; i++) {
1819 mid = (ObjectMonitor *)(block + i);
1820 oop object = (oop) mid->object();
1821
1822 if (object != NULL) {
1823 mid->verify();
1824 }
1825 }
1826 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
1827 }
1828 }
1829
1830 // Check if monitor belongs to the monitor cache
1831 // The list is grow-only so it's *relatively* safe to traverse
1832 // the list of extant blocks without taking a lock.
1833
1834 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1835 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1836
1837 while (block) {
1838 assert(block->object() == CHAINMARKER, "must be a block header");
1839 if (monitor > (ObjectMonitor *)&block[0] &&
1840 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1841 address mon = (address) monitor;
|