221 //
222 // * See also http://blogs.sun.com/dave
223
224
225 void* ObjectMonitor::operator new (size_t size) throw() {
226 return AllocateHeap(size, mtInternal);
227 }
228 void* ObjectMonitor::operator new[] (size_t size) throw() {
229 return operator new (size);
230 }
231 void ObjectMonitor::operator delete(void* p) {
232 FreeHeap(p);
233 }
234 void ObjectMonitor::operator delete[] (void *p) {
235 operator delete(p);
236 }
237
238 // -----------------------------------------------------------------------------
239 // Enter support
240
241 bool ObjectMonitor::enter(TRAPS) {
242 // The following code is ordered to check the most common cases first
243 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
244 Thread * const Self = THREAD;
245
246 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
247 if (cur == NULL) {
248 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
249 assert(_recursions == 0, "invariant");
250 assert(_owner == Self, "invariant");
251 return true;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return true;
258 }
259
260 if (Self->is_lock_owned ((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 // Commute owner from a thread-specific on-stack BasicLockObject address to
264 // a full-fledged "Thread *".
265 _owner = Self;
266 return true;
267 }
268
269 // We've encountered genuine contention.
270 assert(Self->_Stalled == 0, "invariant");
271 Self->_Stalled = intptr_t(this);
272
273 // Try one round of spinning *before* enqueueing Self
274 // and before going through the awkward and expensive state
275 // transitions. The following spin is strictly optional ...
276 // Note that if we acquire the monitor from an initial spin
277 // we forgo posting JVMTI events and firing DTRACE probes.
278 if (TrySpin(Self) > 0) {
279 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
280 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
281 _recursions);
282 assert(((oop)object())->mark() == markOopDesc::encode(this),
283 "object mark must match encoded this: mark=" INTPTR_FORMAT
284 ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
285 p2i(markOopDesc::encode(this)));
286 Self->_Stalled = 0;
287 return true;
288 }
289
290 assert(_owner != Self, "invariant");
291 assert(_succ != Self, "invariant");
292 assert(Self->is_Java_thread(), "invariant");
293 JavaThread * jt = (JavaThread *) Self;
294 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
295 assert(jt->thread_state() != _thread_blocked, "invariant");
296 assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
297 assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
298
299 // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy().
300 // Ensure the object-monitor relationship remains stable while there's contention.
301 const jint contentions = Atomic::add(1, &_contentions);
302 if (contentions <= 0 && _owner == DEFLATER_MARKER) {
303 // Async deflation is in progress. Attempt to restore the
304 // header/dmw to the object's header so that we only retry once
305 // if the deflater thread happens to be slow.
306 const oop obj = (oop) object();
307 install_displaced_markword_in_object(obj);
308 Self->_Stalled = 0;
309 return false; // Caller should retry. Never mind about _contentions as this monitor has been deflated.
310 }
311 // The deflater thread will not deflate this monitor and the monitor is contended, continue.
312
313 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
314 EventJavaMonitorEnter event;
315 if (event.should_commit()) {
316 event.set_monitorClass(((oop)this->object())->klass());
317 event.set_address((uintptr_t)(this->object_addr()));
318 }
319
320 { // Change java thread status to indicate blocked on monitor enter.
321 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
322
323 Self->set_current_pending_monitor(this);
324
325 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
326 if (JvmtiExport::should_post_monitor_contended_enter()) {
327 JvmtiExport::post_monitor_contended_enter(jt, this);
328
329 // The current thread does not yet own the monitor and does not
330 // yet appear on any queues that would get it made the successor.
331 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
353 //
354 _recursions = 0;
355 _succ = NULL;
356 exit(false, Self);
357
358 jt->java_suspend_self();
359 }
360 Self->set_current_pending_monitor(NULL);
361
362 // We cleared the pending monitor info since we've just gotten past
363 // the enter-check-for-suspend dance and we now own the monitor free
364 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
365 // destructor can go to a safepoint at the end of this block. If we
366 // do a thread dump during that safepoint, then this thread will show
367 // as having "-locked" the monitor, but the OS and java.lang.Thread
368 // states will still report that the thread is blocked trying to
369 // acquire it.
370 }
371
372 Atomic::dec(&_contentions);
373 assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
374 Self->_Stalled = 0;
375
376 // Must either set _recursions = 0 or ASSERT _recursions == 0.
377 assert(_recursions == 0, "invariant");
378 assert(_owner == Self, "invariant");
379 assert(_succ != Self, "invariant");
380 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
381
382 // The thread -- now the owner -- is back in vm mode.
383 // Report the glorious news via TI,DTrace and jvmstat.
384 // The probe effect is non-trivial. All the reportage occurs
385 // while we hold the monitor, increasing the length of the critical
386 // section. Amdahl's parallel speedup law comes vividly into play.
387 //
388 // Another option might be to aggregate the events (thread local or
389 // per-monitor aggregation) and defer reporting until a more opportune
390 // time -- such as next time some thread encounters contention but has
391 // yet to acquire the lock. While spinning that thread could
392 // spinning we could increment JVMStat counters, etc.
393
394 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
395 if (JvmtiExport::should_post_monitor_contended_entered()) {
396 JvmtiExport::post_monitor_contended_entered(jt, this);
397
398 // The current thread already owns the monitor and is not going to
399 // call park() for the remainder of the monitor enter protocol. So
400 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
401 // event handler consumed an unpark() issued by the thread that
402 // just exited the monitor.
403 }
404 if (event.should_commit()) {
405 event.set_previousOwner((uintptr_t)_previous_owner_tid);
406 event.commit();
407 }
408 OM_PERFDATA_OP(ContendedLockAttempts, inc());
409 return true;
410 }
411
412 // Caveat: TryLock() is not necessarily serializing if it returns failure.
413 // Callers must compensate as needed.
414
415 int ObjectMonitor::TryLock(Thread * Self) {
416 void * own = _owner;
417 if (own != NULL) return 0;
418 if (Atomic::replace_if_null(Self, &_owner)) {
419 // Either guarantee _recursions == 0 or set _recursions = 0.
420 assert(_recursions == 0, "invariant");
421 assert(_owner == Self, "invariant");
422 return 1;
423 }
424 // The lock had been free momentarily, but we lost the race to the lock.
425 // Interference -- the CAS failed.
426 // We can either return -1 or retry.
427 // Retry doesn't make as much sense because the lock was just acquired.
428 return -1;
429 }
430
431 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
432 // into the header of the object associated with the monitor. This
433 // idempotent method is called by a thread that is deflating a
434 // monitor and by other threads that have detected a race with the
435 // deflation process.
436 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
437 // This function must only be called when (owner == DEFLATER_MARKER
438 // && contentions <= 0), but we can't guarantee that here because
439 // those values could change when the ObjectMonitor gets moved from
440 // the global free list to a per-thread free list.
441
442 guarantee(obj != NULL, "must be non-NULL");
443 if (object() != obj) {
444 // ObjectMonitor's object ref no longer refers to the target object
445 // so the object's header has already been restored.
446 return;
447 }
448
449 markOop dmw = header();
450 if (dmw == NULL) {
451 // ObjectMonitor's header/dmw has been cleared by the deflating
452 // thread so the object's header has already been restored.
453 return;
454 }
455
456 // A non-NULL dmw has to be either neutral (not locked and not marked)
457 // or is already participating in this restoration protocol.
458 assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
493 // reach this point, but only one can win.
494 obj->cas_set_mark(dmw, markOopDesc::encode(this));
495
496 // Note: It does not matter which thread restored the header/dmw
497 // into the object's header. The thread deflating the monitor just
498 // wanted the object's header restored and it is. The threads that
499 // detected a race with the deflation process also wanted the
500 // object's header restored before they retry their operation and
501 // because it is restored they will only retry once.
502
503 if (marked_dmw != NULL) {
504 // Clear _header to NULL if it is still marked_dmw so a racing
505 // install_displaced_markword_in_object() can bail out sooner.
506 Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
507 }
508 }
509
510 #define MAX_RECHECK_INTERVAL 1000
511
512 void ObjectMonitor::EnterI(TRAPS) {
513 Thread * const Self = THREAD;
514 assert(Self->is_Java_thread(), "invariant");
515 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
516
517 // Try the lock - TATAS
518 if (TryLock (Self) > 0) {
519 assert(_succ != Self, "invariant");
520 assert(_owner == Self, "invariant");
521 assert(_Responsible != Self, "invariant");
522 return;
523 }
524
525 if (_owner == DEFLATER_MARKER) {
526 // The deflation protocol finished the first part (setting _owner), but
527 // it failed the second part (making _contentions negative) and bailed.
528 // Because we're called from enter() we have at least one contention.
529 guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
530 "should have been handled by the caller: contentions=%d",
531 _contentions);
532 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
533 // Acquired the monitor.
534 assert(_succ != Self, "invariant");
535 assert(_Responsible != Self, "invariant");
536 return;
537 }
538 }
539
540 assert(InitDone, "Unexpectedly not initialized");
541
542 // We try one round of spinning *before* enqueueing Self.
543 //
544 // If the _owner is ready but OFFPROC we could use a YieldTo()
545 // operation to donate the remainder of this thread's quantum
546 // to the owner. This has subtle but beneficial affinity
547 // effects.
548
549 if (TrySpin(Self) > 0) {
550 assert(_owner == Self, "invariant");
551 assert(_succ != Self, "invariant");
637 for (;;) {
638
639 if (TryLock(Self) > 0) break;
640 assert(_owner != Self, "invariant");
641
642 // park self
643 if (_Responsible == Self) {
644 Self->_ParkEvent->park((jlong) recheckInterval);
645 // Increase the recheckInterval, but clamp the value.
646 recheckInterval *= 8;
647 if (recheckInterval > MAX_RECHECK_INTERVAL) {
648 recheckInterval = MAX_RECHECK_INTERVAL;
649 }
650 } else {
651 Self->_ParkEvent->park();
652 }
653
654 if (TryLock(Self) > 0) break;
655
656 if (_owner == DEFLATER_MARKER) {
657 // The deflation protocol finished the first part (setting _owner), but
658 // it failed the second part (making _contentions negative) and bailed.
659 // Because we're called from enter() we have at least one contention.
660 guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
661 "should have been handled by the caller: contentions=%d",
662 _contentions);
663 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
664 // Acquired the monitor.
665 break;
666 }
667 }
668
669 // The lock is still contested.
670 // Keep a tally of the # of futile wakeups.
671 // Note that the counter is not protected by a lock or updated by atomics.
672 // That is by design - we trade "lossy" counters which are exposed to
673 // races during updates for a lower probe effect.
674
675 // This PerfData object can be used in parallel with a safepoint.
676 // See the work around in PerfDataManager::destroy().
677 OM_PERFDATA_OP(FutileWakeups, inc());
678 ++nWakeups;
679
680 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
681 // We can defer clearing _succ until after the spin completes
682 // TrySpin() must tolerate being called with _succ == Self.
753 // the lock. The barrier ensures that changes to monitor meta-data and data
754 // protected by the lock will be visible before we release the lock, and
755 // therefore before some other thread (CPU) has a chance to acquire the lock.
756 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
757 //
758 // Critically, any prior STs to _succ or EntryList must be visible before
759 // the ST of null into _owner in the *subsequent* (following) corresponding
760 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
761 // execute a serializing instruction.
762
763 return;
764 }
765
766 // ReenterI() is a specialized inline form of the latter half of the
767 // contended slow-path from EnterI(). We use ReenterI() only for
768 // monitor reentry in wait().
769 //
770 // In the future we should reconcile EnterI() and ReenterI().
771
772 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
773 assert(Self != NULL, "invariant");
774 assert(SelfNode != NULL, "invariant");
775 assert(SelfNode->_thread == Self, "invariant");
776 assert(_waiters > 0, "invariant");
777 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
778 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
779 JavaThread * jt = (JavaThread *) Self;
780
781 int nWakeups = 0;
782 for (;;) {
783 ObjectWaiter::TStates v = SelfNode->TState;
784 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
785 assert(_owner != Self, "invariant");
786
787 if (TryLock(Self) > 0) break;
788 if (TrySpin(Self) > 0) break;
789
790 if (_owner == DEFLATER_MARKER) {
791 // The deflation protocol finished the first part (setting _owner),
792 // but it will observe _waiters != 0 and will bail out. Because we're
793 // called from wait() we may or may not have any contentions.
794 guarantee(_contentions >= 0, "owner == DEFLATER_MARKER && contentions < 0 "
795 "should have been handled by the caller: contentions=%d",
796 _contentions);
797 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
798 // Acquired the monitor.
799 break;
800 }
801 }
802
803 // State transition wrappers around park() ...
804 // ReenterI() wisely defers state transitions until
805 // it's clear we must park the thread.
806 {
807 OSThreadContendState osts(Self->osthread());
808 ThreadBlockInVM tbivm(jt);
809
810 // cleared by handle_special_suspend_equivalent_condition()
811 // or java_suspend_self()
812 jt->set_suspend_equivalent();
813 Self->_ParkEvent->park();
814
815 // were we externally suspended while we were waiting?
816 for (;;) {
1241 assert(InitDone, "Unexpectedly not initialized");
1242
1243 if (THREAD != _owner) {
1244 if (THREAD->is_lock_owned ((address)_owner)) {
1245 assert(_recursions == 0, "internal state error");
1246 _owner = THREAD; // Convert from basiclock addr to Thread addr
1247 _recursions = 0;
1248 }
1249 }
1250
1251 guarantee(Self == _owner, "complete_exit not owner");
1252 intptr_t save = _recursions; // record the old recursion count
1253 _recursions = 0; // set the recursion level to be 0
1254 exit(true, Self); // exit the monitor
1255 guarantee(_owner != Self, "invariant");
1256 return save;
1257 }
1258
1259 // reenter() enters a lock and sets recursion count
1260 // complete_exit/reenter operate as a wait without waiting
1261 bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1262 Thread * const Self = THREAD;
1263 assert(Self->is_Java_thread(), "Must be Java thread!");
1264 JavaThread *jt = (JavaThread *)THREAD;
1265
1266 guarantee(_owner != Self, "reenter already owner");
1267 if (!enter(THREAD)) {
1268 // Failed to enter the monitor so return for a retry.
1269 return false;
1270 }
1271 // Entered the monitor.
1272 guarantee(_recursions == 0, "reenter recursion");
1273 _recursions = recursions;
1274 return true;
1275 }
1276
1277
1278 // -----------------------------------------------------------------------------
1279 // A macro is used below because there may already be a pending
1280 // exception which should not abort the execution of the routines
1281 // which use this (which is why we don't put this into check_slow and
1282 // call it with a CHECK argument).
1283
1284 #define CHECK_OWNER() \
1285 do { \
1286 if (THREAD != _owner) { \
1287 if (THREAD->is_lock_owned((address) _owner)) { \
1288 _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \
1289 _recursions = 0; \
1290 } else { \
1291 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1292 } \
1293 } \
1294 } while (false)
1482 //
1483 // We redo the unpark() to ensure forward progress, i.e., we
1484 // don't want all pending threads hanging (parked) with none
1485 // entering the unlocked monitor.
1486 node._event->unpark();
1487 }
1488 }
1489
1490 if (event.should_commit()) {
1491 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1492 }
1493
1494 OrderAccess::fence();
1495
1496 assert(Self->_Stalled != 0, "invariant");
1497 Self->_Stalled = 0;
1498
1499 assert(_owner != Self, "invariant");
1500 ObjectWaiter::TStates v = node.TState;
1501 if (v == ObjectWaiter::TS_RUN) {
1502 const bool success = enter(Self);
1503 ADIM_guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0");
1504 } else {
1505 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1506 ReenterI(Self, &node);
1507 node.wait_reenter_end(this);
1508 }
1509
1510 // Self has reacquired the lock.
1511 // Lifecycle - the node representing Self must not appear on any queues.
1512 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1513 // want residual elements associated with this thread left on any lists.
1514 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1515 assert(_owner == Self, "invariant");
1516 assert(_succ != Self, "invariant");
1517 } // OSThreadWaitState()
1518
1519 jt->set_current_waiting_monitor(NULL);
1520
1521 guarantee(_recursions == 0, "invariant");
1522 _recursions = save; // restore the old recursion count
1523 _waiters--; // decrement the number of waiters
2047 }
2048 #define NEWPERFVARIABLE(n) \
2049 { \
2050 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
2051 CHECK); \
2052 }
2053 NEWPERFCOUNTER(_sync_Inflations);
2054 NEWPERFCOUNTER(_sync_Deflations);
2055 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2056 NEWPERFCOUNTER(_sync_FutileWakeups);
2057 NEWPERFCOUNTER(_sync_Parks);
2058 NEWPERFCOUNTER(_sync_Notifications);
2059 NEWPERFVARIABLE(_sync_MonExtant);
2060 #undef NEWPERFCOUNTER
2061 #undef NEWPERFVARIABLE
2062 }
2063
2064 DEBUG_ONLY(InitDone = true;)
2065 }
2066
2067 // For internal used by ObjectSynchronizer::monitors_iterate().
2068 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
2069 om_ptr->inc_ref_count();
2070 _om_ptr = om_ptr;
2071 }
2072
2073 ObjectMonitorHandle::~ObjectMonitorHandle() {
2074 if (_om_ptr != NULL) {
2075 _om_ptr->dec_ref_count();
2076 _om_ptr = NULL;
2077 }
2078 }
2079
2080 // Save the ObjectMonitor* associated with the specified markOop and
2081 // increment the ref_count. This function should only be called if
2082 // the caller has verified mark->has_monitor() == true. The object
2083 // parameter is needed to verify that ObjectMonitor* has not been
2084 // deflated and reused for another object.
2085 //
2086 // This function returns true if the ObjectMonitor* has been safely
2087 // saved. This function returns false if we have lost a race with
2088 // async deflation; the caller should retry as appropriate.
2089 //
2090 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2091 guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2092 p2i(mark));
2093
2094 ObjectMonitor * om_ptr = mark->monitor();
2095 om_ptr->inc_ref_count();
2096
2097 if (AsyncDeflateIdleMonitors) {
2098 // Race here if monitor is not owned! The above ref_count bump
2099 // will cause subsequent async deflation to skip it. However,
2100 // previous or concurrent async deflation is a race.
2101 if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->_contentions <= 0) {
2102 // Async deflation is in progress.
2103 if (om_ptr->ref_count() <= 0) {
2104 // And our ref_count increment above lost the race to async
2105 // deflation. Attempt to restore the header/dmw to the
2106 // object's header so that we only retry once if the deflater
2107 // thread happens to be slow.
2108 om_ptr->install_displaced_markword_in_object(object);
2109 om_ptr->dec_ref_count();
2110 return false;
2111 }
2112 }
2113 // The ObjectMonitor could have been deflated and reused for
2114 // another object before we bumped the ref_count so make sure
2115 // our object still refers to this ObjectMonitor.
2116 const markOop tmp = object->mark();
2117 if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2118 // Async deflation and reuse won the race so we have to retry.
2119 // Skip object header restoration since that's already done.
2120 om_ptr->dec_ref_count();
2121 return false;
2122 }
2123 }
2124
2125 guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2126 p2i(_om_ptr));
2127 _om_ptr = om_ptr;
2128 return true;
2129 }
2130
2131 // For internal use by ObjectSynchronizer::inflate().
2132 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2133 if (_om_ptr == NULL) {
2134 guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2135 om_ptr->inc_ref_count();
2136 _om_ptr = om_ptr;
2137 } else {
2138 guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2139 _om_ptr->dec_ref_count();
2140 _om_ptr = NULL;
2141 }
2142 }
|
221 //
222 // * See also http://blogs.sun.com/dave
223
224
225 void* ObjectMonitor::operator new (size_t size) throw() {
226 return AllocateHeap(size, mtInternal);
227 }
228 void* ObjectMonitor::operator new[] (size_t size) throw() {
229 return operator new (size);
230 }
231 void ObjectMonitor::operator delete(void* p) {
232 FreeHeap(p);
233 }
234 void ObjectMonitor::operator delete[] (void *p) {
235 operator delete(p);
236 }
237
238 // -----------------------------------------------------------------------------
239 // Enter support
240
241 void ObjectMonitor::enter(TRAPS) {
242 ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
243
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
249 if (cur == NULL) {
250 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
251 assert(_recursions == 0, "invariant");
252 assert(_owner == Self, "invariant");
253 return;
254 }
255
256 if (cur == Self) {
257 // TODO-FIXME: check for integer overflow! BUGID 6557169.
258 _recursions++;
259 return;
260 }
261
262 if (Self->is_lock_owned ((address)cur)) {
263 assert(_recursions == 0, "internal state error");
264 _recursions = 1;
265 // Commute owner from a thread-specific on-stack BasicLockObject address to
266 // a full-fledged "Thread *".
267 _owner = Self;
268 return;
269 }
270
271 // We've encountered genuine contention.
272 assert(Self->_Stalled == 0, "invariant");
273 Self->_Stalled = intptr_t(this);
274
275 // Try one round of spinning *before* enqueueing Self
276 // and before going through the awkward and expensive state
277 // transitions. The following spin is strictly optional ...
278 // Note that if we acquire the monitor from an initial spin
279 // we forgo posting JVMTI events and firing DTRACE probes.
280 if (TrySpin(Self) > 0) {
281 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
282 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
283 _recursions);
284 assert(((oop)object())->mark() == markOopDesc::encode(this),
285 "object mark must match encoded this: mark=" INTPTR_FORMAT
286 ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
287 p2i(markOopDesc::encode(this)));
288 Self->_Stalled = 0;
289 return;
290 }
291
292 assert(_owner != Self, "invariant");
293 assert(_succ != Self, "invariant");
294 assert(Self->is_Java_thread(), "invariant");
295 JavaThread * jt = (JavaThread *) Self;
296 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
297 assert(jt->thread_state() != _thread_blocked, "invariant");
298 assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
299 assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
300
301 // Prevent deflation. See ObjectSynchronizer::deflate_monitor(),
302 // ObjectSynchronizer::deflate_monitor_using_JT() and is_busy().
303 // Ensure the object <-> monitor relationship remains stable while
304 // there's contention.
305 Atomic::add(1, &_contentions);
306
307 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
308 EventJavaMonitorEnter event;
309 if (event.should_commit()) {
310 event.set_monitorClass(((oop)this->object())->klass());
311 event.set_address((uintptr_t)(this->object_addr()));
312 }
313
314 { // Change java thread status to indicate blocked on monitor enter.
315 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
316
317 Self->set_current_pending_monitor(this);
318
319 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
320 if (JvmtiExport::should_post_monitor_contended_enter()) {
321 JvmtiExport::post_monitor_contended_enter(jt, this);
322
323 // The current thread does not yet own the monitor and does not
324 // yet appear on any queues that would get it made the successor.
325 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
347 //
348 _recursions = 0;
349 _succ = NULL;
350 exit(false, Self);
351
352 jt->java_suspend_self();
353 }
354 Self->set_current_pending_monitor(NULL);
355
356 // We cleared the pending monitor info since we've just gotten past
357 // the enter-check-for-suspend dance and we now own the monitor free
358 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
359 // destructor can go to a safepoint at the end of this block. If we
360 // do a thread dump during that safepoint, then this thread will show
361 // as having "-locked" the monitor, but the OS and java.lang.Thread
362 // states will still report that the thread is blocked trying to
363 // acquire it.
364 }
365
366 Atomic::dec(&_contentions);
367 assert(_contentions >= 0, "must not be negative: contentions=%d", _contentions);
368 Self->_Stalled = 0;
369
370 // Must either set _recursions = 0 or ASSERT _recursions == 0.
371 assert(_recursions == 0, "invariant");
372 assert(_owner == Self, "invariant");
373 assert(_succ != Self, "invariant");
374 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
375
376 // The thread -- now the owner -- is back in vm mode.
377 // Report the glorious news via TI,DTrace and jvmstat.
378 // The probe effect is non-trivial. All the reportage occurs
379 // while we hold the monitor, increasing the length of the critical
380 // section. Amdahl's parallel speedup law comes vividly into play.
381 //
382 // Another option might be to aggregate the events (thread local or
383 // per-monitor aggregation) and defer reporting until a more opportune
384 // time -- such as next time some thread encounters contention but has
385 // yet to acquire the lock. While spinning that thread could
386 // spinning we could increment JVMStat counters, etc.
387
388 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
389 if (JvmtiExport::should_post_monitor_contended_entered()) {
390 JvmtiExport::post_monitor_contended_entered(jt, this);
391
392 // The current thread already owns the monitor and is not going to
393 // call park() for the remainder of the monitor enter protocol. So
394 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
395 // event handler consumed an unpark() issued by the thread that
396 // just exited the monitor.
397 }
398 if (event.should_commit()) {
399 event.set_previousOwner((uintptr_t)_previous_owner_tid);
400 event.commit();
401 }
402 OM_PERFDATA_OP(ContendedLockAttempts, inc());
403 }
404
405 // Caveat: TryLock() is not necessarily serializing if it returns failure.
406 // Callers must compensate as needed.
407
408 int ObjectMonitor::TryLock(Thread * Self) {
409 void * own = _owner;
410 if (own != NULL) return 0;
411 if (Atomic::replace_if_null(Self, &_owner)) {
412 // Either guarantee _recursions == 0 or set _recursions = 0.
413 assert(_recursions == 0, "invariant");
414 assert(_owner == Self, "invariant");
415 return 1;
416 }
417 // The lock had been free momentarily, but we lost the race to the lock.
418 // Interference -- the CAS failed.
419 // We can either return -1 or retry.
420 // Retry doesn't make as much sense because the lock was just acquired.
421 return -1;
422 }
423
424 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
425 // into the header of the object associated with the monitor. This
426 // idempotent method is called by a thread that is deflating a
427 // monitor and by other threads that have detected a race with the
428 // deflation process.
429 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
430 // This function must only be called when (owner == DEFLATER_MARKER
431 // && ref_count <= 0), but we can't guarantee that here because
432 // those values could change when the ObjectMonitor gets moved from
433 // the global free list to a per-thread free list.
434
435 guarantee(obj != NULL, "must be non-NULL");
436 if (object() != obj) {
437 // ObjectMonitor's object ref no longer refers to the target object
438 // so the object's header has already been restored.
439 return;
440 }
441
442 markOop dmw = header();
443 if (dmw == NULL) {
444 // ObjectMonitor's header/dmw has been cleared by the deflating
445 // thread so the object's header has already been restored.
446 return;
447 }
448
449 // A non-NULL dmw has to be either neutral (not locked and not marked)
450 // or is already participating in this restoration protocol.
451 assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
486 // reach this point, but only one can win.
487 obj->cas_set_mark(dmw, markOopDesc::encode(this));
488
489 // Note: It does not matter which thread restored the header/dmw
490 // into the object's header. The thread deflating the monitor just
491 // wanted the object's header restored and it is. The threads that
492 // detected a race with the deflation process also wanted the
493 // object's header restored before they retry their operation and
494 // because it is restored they will only retry once.
495
496 if (marked_dmw != NULL) {
497 // Clear _header to NULL if it is still marked_dmw so a racing
498 // install_displaced_markword_in_object() can bail out sooner.
499 Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
500 }
501 }
502
503 #define MAX_RECHECK_INTERVAL 1000
504
505 void ObjectMonitor::EnterI(TRAPS) {
506 ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
507
508 Thread * const Self = THREAD;
509 assert(Self->is_Java_thread(), "invariant");
510 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
511
512 // Try the lock - TATAS
513 if (TryLock (Self) > 0) {
514 assert(_succ != Self, "invariant");
515 assert(_owner == Self, "invariant");
516 assert(_Responsible != Self, "invariant");
517 return;
518 }
519
520 if (_owner == DEFLATER_MARKER) {
521 // The deflation protocol finished the first part (setting owner), but
522 // it failed the second part (making ref_count negative) and bailed.
523 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
524 // Acquired the monitor.
525 assert(_succ != Self, "invariant");
526 assert(_Responsible != Self, "invariant");
527 return;
528 }
529 }
530
531 assert(InitDone, "Unexpectedly not initialized");
532
533 // We try one round of spinning *before* enqueueing Self.
534 //
535 // If the _owner is ready but OFFPROC we could use a YieldTo()
536 // operation to donate the remainder of this thread's quantum
537 // to the owner. This has subtle but beneficial affinity
538 // effects.
539
540 if (TrySpin(Self) > 0) {
541 assert(_owner == Self, "invariant");
542 assert(_succ != Self, "invariant");
628 for (;;) {
629
630 if (TryLock(Self) > 0) break;
631 assert(_owner != Self, "invariant");
632
633 // park self
634 if (_Responsible == Self) {
635 Self->_ParkEvent->park((jlong) recheckInterval);
636 // Increase the recheckInterval, but clamp the value.
637 recheckInterval *= 8;
638 if (recheckInterval > MAX_RECHECK_INTERVAL) {
639 recheckInterval = MAX_RECHECK_INTERVAL;
640 }
641 } else {
642 Self->_ParkEvent->park();
643 }
644
645 if (TryLock(Self) > 0) break;
646
647 if (_owner == DEFLATER_MARKER) {
648 // The deflation protocol finished the first part (setting owner), but
649 // it failed the second part (making ref_count negative) and bailed.
650 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
651 // Acquired the monitor.
652 break;
653 }
654 }
655
656 // The lock is still contested.
657 // Keep a tally of the # of futile wakeups.
658 // Note that the counter is not protected by a lock or updated by atomics.
659 // That is by design - we trade "lossy" counters which are exposed to
660 // races during updates for a lower probe effect.
661
662 // This PerfData object can be used in parallel with a safepoint.
663 // See the work around in PerfDataManager::destroy().
664 OM_PERFDATA_OP(FutileWakeups, inc());
665 ++nWakeups;
666
667 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
668 // We can defer clearing _succ until after the spin completes
669 // TrySpin() must tolerate being called with _succ == Self.
740 // the lock. The barrier ensures that changes to monitor meta-data and data
741 // protected by the lock will be visible before we release the lock, and
742 // therefore before some other thread (CPU) has a chance to acquire the lock.
743 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
744 //
745 // Critically, any prior STs to _succ or EntryList must be visible before
746 // the ST of null into _owner in the *subsequent* (following) corresponding
747 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
748 // execute a serializing instruction.
749
750 return;
751 }
752
753 // ReenterI() is a specialized inline form of the latter half of the
754 // contended slow-path from EnterI(). We use ReenterI() only for
755 // monitor reentry in wait().
756 //
757 // In the future we should reconcile EnterI() and ReenterI().
758
759 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
760 ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
761
762 assert(Self != NULL, "invariant");
763 assert(SelfNode != NULL, "invariant");
764 assert(SelfNode->_thread == Self, "invariant");
765 assert(_waiters > 0, "invariant");
766 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
767 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
768 JavaThread * jt = (JavaThread *) Self;
769
770 int nWakeups = 0;
771 for (;;) {
772 ObjectWaiter::TStates v = SelfNode->TState;
773 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
774 assert(_owner != Self, "invariant");
775
776 if (TryLock(Self) > 0) break;
777 if (TrySpin(Self) > 0) break;
778
779 if (_owner == DEFLATER_MARKER) {
780 // The deflation protocol finished the first part (setting owner), but
781 // it failed the second part (making ref_count negative) and bailed.
782 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
783 // Acquired the monitor.
784 break;
785 }
786 }
787
788 // State transition wrappers around park() ...
789 // ReenterI() wisely defers state transitions until
790 // it's clear we must park the thread.
791 {
792 OSThreadContendState osts(Self->osthread());
793 ThreadBlockInVM tbivm(jt);
794
795 // cleared by handle_special_suspend_equivalent_condition()
796 // or java_suspend_self()
797 jt->set_suspend_equivalent();
798 Self->_ParkEvent->park();
799
800 // were we externally suspended while we were waiting?
801 for (;;) {
1226 assert(InitDone, "Unexpectedly not initialized");
1227
1228 if (THREAD != _owner) {
1229 if (THREAD->is_lock_owned ((address)_owner)) {
1230 assert(_recursions == 0, "internal state error");
1231 _owner = THREAD; // Convert from basiclock addr to Thread addr
1232 _recursions = 0;
1233 }
1234 }
1235
1236 guarantee(Self == _owner, "complete_exit not owner");
1237 intptr_t save = _recursions; // record the old recursion count
1238 _recursions = 0; // set the recursion level to be 0
1239 exit(true, Self); // exit the monitor
1240 guarantee(_owner != Self, "invariant");
1241 return save;
1242 }
1243
1244 // reenter() enters a lock and sets recursion count
1245 // complete_exit/reenter operate as a wait without waiting
1246 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1247 Thread * const Self = THREAD;
1248 assert(Self->is_Java_thread(), "Must be Java thread!");
1249 JavaThread *jt = (JavaThread *)THREAD;
1250
1251 guarantee(_owner != Self, "reenter already owner");
1252 enter(THREAD);
1253 // Entered the monitor.
1254 guarantee(_recursions == 0, "reenter recursion");
1255 _recursions = recursions;
1256 }
1257
1258
1259 // -----------------------------------------------------------------------------
1260 // A macro is used below because there may already be a pending
1261 // exception which should not abort the execution of the routines
1262 // which use this (which is why we don't put this into check_slow and
1263 // call it with a CHECK argument).
1264
1265 #define CHECK_OWNER() \
1266 do { \
1267 if (THREAD != _owner) { \
1268 if (THREAD->is_lock_owned((address) _owner)) { \
1269 _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \
1270 _recursions = 0; \
1271 } else { \
1272 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1273 } \
1274 } \
1275 } while (false)
1463 //
1464 // We redo the unpark() to ensure forward progress, i.e., we
1465 // don't want all pending threads hanging (parked) with none
1466 // entering the unlocked monitor.
1467 node._event->unpark();
1468 }
1469 }
1470
1471 if (event.should_commit()) {
1472 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1473 }
1474
1475 OrderAccess::fence();
1476
1477 assert(Self->_Stalled != 0, "invariant");
1478 Self->_Stalled = 0;
1479
1480 assert(_owner != Self, "invariant");
1481 ObjectWaiter::TStates v = node.TState;
1482 if (v == ObjectWaiter::TS_RUN) {
1483 enter(Self);
1484 } else {
1485 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1486 ReenterI(Self, &node);
1487 node.wait_reenter_end(this);
1488 }
1489
1490 // Self has reacquired the lock.
1491 // Lifecycle - the node representing Self must not appear on any queues.
1492 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1493 // want residual elements associated with this thread left on any lists.
1494 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1495 assert(_owner == Self, "invariant");
1496 assert(_succ != Self, "invariant");
1497 } // OSThreadWaitState()
1498
1499 jt->set_current_waiting_monitor(NULL);
1500
1501 guarantee(_recursions == 0, "invariant");
1502 _recursions = save; // restore the old recursion count
1503 _waiters--; // decrement the number of waiters
2027 }
2028 #define NEWPERFVARIABLE(n) \
2029 { \
2030 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
2031 CHECK); \
2032 }
2033 NEWPERFCOUNTER(_sync_Inflations);
2034 NEWPERFCOUNTER(_sync_Deflations);
2035 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2036 NEWPERFCOUNTER(_sync_FutileWakeups);
2037 NEWPERFCOUNTER(_sync_Parks);
2038 NEWPERFCOUNTER(_sync_Notifications);
2039 NEWPERFVARIABLE(_sync_MonExtant);
2040 #undef NEWPERFCOUNTER
2041 #undef NEWPERFVARIABLE
2042 }
2043
2044 DEBUG_ONLY(InitDone = true;)
2045 }
2046
2047 // For internal use by ObjectSynchronizer::monitors_iterate().
2048 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
2049 om_ptr->inc_ref_count();
2050 _om_ptr = om_ptr;
2051 }
2052
2053 ObjectMonitorHandle::~ObjectMonitorHandle() {
2054 if (_om_ptr != NULL) {
2055 _om_ptr->dec_ref_count();
2056 _om_ptr = NULL;
2057 }
2058 }
2059
2060 // Save the ObjectMonitor* associated with the specified markOop and
2061 // increment the ref_count. This function should only be called if
2062 // the caller has verified mark->has_monitor() == true. The object
2063 // parameter is needed to verify that ObjectMonitor* has not been
2064 // deflated and reused for another object.
2065 //
2066 // This function returns true if the ObjectMonitor* has been safely
2067 // saved. This function returns false if we have lost a race with
2068 // async deflation; the caller should retry as appropriate.
2069 //
2070 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2071 guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2072 p2i(mark));
2073
2074 ObjectMonitor * om_ptr = mark->monitor();
2075 om_ptr->inc_ref_count();
2076
2077 if (AsyncDeflateIdleMonitors) {
2078 // Race here if monitor is not owned! The above ref_count bump
2079 // will cause subsequent async deflation to skip it. However,
2080 // previous or concurrent async deflation is a race.
2081 if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) {
2082 // Async deflation is in progress and our ref_count increment
2083 // above lost the race to async deflation. Attempt to restore
2084 // the header/dmw to the object's header so that we only retry
2085 // once if the deflater thread happens to be slow.
2086 om_ptr->install_displaced_markword_in_object(object);
2087 om_ptr->dec_ref_count();
2088 return false;
2089 }
2090 // The ObjectMonitor could have been deflated and reused for
2091 // another object before we bumped the ref_count so make sure
2092 // our object still refers to this ObjectMonitor.
2093 const markOop tmp = object->mark();
2094 if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2095 // Async deflation and reuse won the race so we have to retry.
2096 // Skip object header restoration since that's already done.
2097 om_ptr->dec_ref_count();
2098 return false;
2099 }
2100 }
2101
2102 ADIM_guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2103 p2i(_om_ptr));
2104 _om_ptr = om_ptr;
2105 return true;
2106 }
2107
2108 // For internal use by ObjectSynchronizer::inflate().
2109 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2110 if (_om_ptr == NULL) {
2111 ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2112 om_ptr->inc_ref_count();
2113 _om_ptr = om_ptr;
2114 } else {
2115 ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2116 _om_ptr->dec_ref_count();
2117 _om_ptr = NULL;
2118 }
2119 }
|