221 //
222 // * See also http://blogs.sun.com/dave
223
224
225 void* ObjectMonitor::operator new (size_t size) throw() {
226 return AllocateHeap(size, mtInternal);
227 }
228 void* ObjectMonitor::operator new[] (size_t size) throw() {
229 return operator new (size);
230 }
231 void ObjectMonitor::operator delete(void* p) {
232 FreeHeap(p);
233 }
234 void ObjectMonitor::operator delete[] (void *p) {
235 operator delete(p);
236 }
237
238 // -----------------------------------------------------------------------------
239 // Enter support
240
241 void ObjectMonitor::enter(TRAPS) {
242 // The following code is ordered to check the most common cases first
243 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
244 Thread * const Self = THREAD;
245
246 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
247 if (cur == NULL) {
248 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
249 assert(_recursions == 0, "invariant");
250 assert(_owner == Self, "invariant");
251 return;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return;
258 }
259
260 if (Self->is_lock_owned ((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 // Commute owner from a thread-specific on-stack BasicLockObject address to
264 // a full-fledged "Thread *".
265 _owner = Self;
266 return;
267 }
268
269 // We've encountered genuine contention.
270 assert(Self->_Stalled == 0, "invariant");
271 Self->_Stalled = intptr_t(this);
272
273 // Try one round of spinning *before* enqueueing Self
274 // and before going through the awkward and expensive state
275 // transitions. The following spin is strictly optional ...
276 // Note that if we acquire the monitor from an initial spin
277 // we forgo posting JVMTI events and firing DTRACE probes.
278 if (TrySpin(Self) > 0) {
279 assert(_owner == Self, "invariant");
280 assert(_recursions == 0, "invariant");
281 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
282 Self->_Stalled = 0;
283 return;
284 }
285
286 assert(_owner != Self, "invariant");
287 assert(_succ != Self, "invariant");
288 assert(Self->is_Java_thread(), "invariant");
289 JavaThread * jt = (JavaThread *) Self;
290 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
291 assert(jt->thread_state() != _thread_blocked, "invariant");
292 assert(this->object() != NULL, "invariant");
293 assert(_count >= 0, "invariant");
294
295 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
296 // Ensure the object-monitor relationship remains stable while there's contention.
297 Atomic::inc(&_count);
298
299 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
300 EventJavaMonitorEnter event;
301 if (event.should_commit()) {
302 event.set_monitorClass(((oop)this->object())->klass());
303 event.set_address((uintptr_t)(this->object_addr()));
304 }
305
306 { // Change java thread status to indicate blocked on monitor enter.
307 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
308
309 Self->set_current_pending_monitor(this);
310
311 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
312 if (JvmtiExport::should_post_monitor_contended_enter()) {
313 JvmtiExport::post_monitor_contended_enter(jt, this);
314
315 // The current thread does not yet own the monitor and does not
316 // yet appear on any queues that would get it made the successor.
317 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
339 //
340 _recursions = 0;
341 _succ = NULL;
342 exit(false, Self);
343
344 jt->java_suspend_self();
345 }
346 Self->set_current_pending_monitor(NULL);
347
348 // We cleared the pending monitor info since we've just gotten past
349 // the enter-check-for-suspend dance and we now own the monitor free
350 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
351 // destructor can go to a safepoint at the end of this block. If we
352 // do a thread dump during that safepoint, then this thread will show
353 // as having "-locked" the monitor, but the OS and java.lang.Thread
354 // states will still report that the thread is blocked trying to
355 // acquire it.
356 }
357
358 Atomic::dec(&_count);
359 assert(_count >= 0, "invariant");
360 Self->_Stalled = 0;
361
362 // Must either set _recursions = 0 or ASSERT _recursions == 0.
363 assert(_recursions == 0, "invariant");
364 assert(_owner == Self, "invariant");
365 assert(_succ != Self, "invariant");
366 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
367
368 // The thread -- now the owner -- is back in vm mode.
369 // Report the glorious news via TI,DTrace and jvmstat.
370 // The probe effect is non-trivial. All the reportage occurs
371 // while we hold the monitor, increasing the length of the critical
372 // section. Amdahl's parallel speedup law comes vividly into play.
373 //
374 // Another option might be to aggregate the events (thread local or
375 // per-monitor aggregation) and defer reporting until a more opportune
376 // time -- such as next time some thread encounters contention but has
377 // yet to acquire the lock. While spinning that thread could
378 // spinning we could increment JVMStat counters, etc.
379
380 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
381 if (JvmtiExport::should_post_monitor_contended_entered()) {
382 JvmtiExport::post_monitor_contended_entered(jt, this);
383
384 // The current thread already owns the monitor and is not going to
385 // call park() for the remainder of the monitor enter protocol. So
386 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
387 // event handler consumed an unpark() issued by the thread that
388 // just exited the monitor.
389 }
390 if (event.should_commit()) {
391 event.set_previousOwner((uintptr_t)_previous_owner_tid);
392 event.commit();
393 }
394 OM_PERFDATA_OP(ContendedLockAttempts, inc());
395 }
396
397 // Caveat: TryLock() is not necessarily serializing if it returns failure.
398 // Callers must compensate as needed.
399
400 int ObjectMonitor::TryLock(Thread * Self) {
401 void * own = _owner;
402 if (own != NULL) return 0;
403 if (Atomic::replace_if_null(Self, &_owner)) {
404 // Either guarantee _recursions == 0 or set _recursions = 0.
405 assert(_recursions == 0, "invariant");
406 assert(_owner == Self, "invariant");
407 return 1;
408 }
409 // The lock had been free momentarily, but we lost the race to the lock.
410 // Interference -- the CAS failed.
411 // We can either return -1 or retry.
412 // Retry doesn't make as much sense because the lock was just acquired.
413 return -1;
414 }
415
416 #define MAX_RECHECK_INTERVAL 1000
417
418 void ObjectMonitor::EnterI(TRAPS) {
419 Thread * const Self = THREAD;
420 assert(Self->is_Java_thread(), "invariant");
421 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
422
423 // Try the lock - TATAS
424 if (TryLock (Self) > 0) {
425 assert(_succ != Self, "invariant");
426 assert(_owner == Self, "invariant");
427 assert(_Responsible != Self, "invariant");
428 return;
429 }
430
431 assert(InitDone, "Unexpectedly not initialized");
432
433 // We try one round of spinning *before* enqueueing Self.
434 //
435 // If the _owner is ready but OFFPROC we could use a YieldTo()
436 // operation to donate the remainder of this thread's quantum
437 // to the owner. This has subtle but beneficial affinity
438 // effects.
439
440 if (TrySpin(Self) > 0) {
441 assert(_owner == Self, "invariant");
442 assert(_succ != Self, "invariant");
443 assert(_Responsible != Self, "invariant");
444 return;
445 }
446
447 // The Spin failed -- Enqueue and park the thread ...
448 assert(_succ != Self, "invariant");
449 assert(_owner != Self, "invariant");
450 assert(_Responsible != Self, "invariant");
527
528 for (;;) {
529
530 if (TryLock(Self) > 0) break;
531 assert(_owner != Self, "invariant");
532
533 // park self
534 if (_Responsible == Self) {
535 Self->_ParkEvent->park((jlong) recheckInterval);
536 // Increase the recheckInterval, but clamp the value.
537 recheckInterval *= 8;
538 if (recheckInterval > MAX_RECHECK_INTERVAL) {
539 recheckInterval = MAX_RECHECK_INTERVAL;
540 }
541 } else {
542 Self->_ParkEvent->park();
543 }
544
545 if (TryLock(Self) > 0) break;
546
547 // The lock is still contested.
548 // Keep a tally of the # of futile wakeups.
549 // Note that the counter is not protected by a lock or updated by atomics.
550 // That is by design - we trade "lossy" counters which are exposed to
551 // races during updates for a lower probe effect.
552
553 // This PerfData object can be used in parallel with a safepoint.
554 // See the work around in PerfDataManager::destroy().
555 OM_PERFDATA_OP(FutileWakeups, inc());
556 ++nWakeups;
557
558 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
559 // We can defer clearing _succ until after the spin completes
560 // TrySpin() must tolerate being called with _succ == Self.
561 // Try yet another round of adaptive spinning.
562 if (TrySpin(Self) > 0) break;
563
564 // We can find that we were unpark()ed and redesignated _succ while
565 // we were spinning. That's harmless. If we iterate and call park(),
566 // park() will consume the event and return immediately and we'll
648 // In the future we should reconcile EnterI() and ReenterI().
649
650 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
651 assert(Self != NULL, "invariant");
652 assert(SelfNode != NULL, "invariant");
653 assert(SelfNode->_thread == Self, "invariant");
654 assert(_waiters > 0, "invariant");
655 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
656 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
657 JavaThread * jt = (JavaThread *) Self;
658
659 int nWakeups = 0;
660 for (;;) {
661 ObjectWaiter::TStates v = SelfNode->TState;
662 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
663 assert(_owner != Self, "invariant");
664
665 if (TryLock(Self) > 0) break;
666 if (TrySpin(Self) > 0) break;
667
668 // State transition wrappers around park() ...
669 // ReenterI() wisely defers state transitions until
670 // it's clear we must park the thread.
671 {
672 OSThreadContendState osts(Self->osthread());
673 ThreadBlockInVM tbivm(jt);
674
675 // cleared by handle_special_suspend_equivalent_condition()
676 // or java_suspend_self()
677 jt->set_suspend_equivalent();
678 Self->_ParkEvent->park();
679
680 // were we externally suspended while we were waiting?
681 for (;;) {
682 if (!ExitSuspendEquivalent(jt)) break;
683 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
684 jt->java_suspend_self();
685 jt->set_suspend_equivalent();
686 }
687 }
1105 assert(InitDone, "Unexpectedly not initialized");
1106
1107 if (THREAD != _owner) {
1108 if (THREAD->is_lock_owned ((address)_owner)) {
1109 assert(_recursions == 0, "internal state error");
1110 _owner = THREAD; // Convert from basiclock addr to Thread addr
1111 _recursions = 0;
1112 }
1113 }
1114
1115 guarantee(Self == _owner, "complete_exit not owner");
1116 intptr_t save = _recursions; // record the old recursion count
1117 _recursions = 0; // set the recursion level to be 0
1118 exit(true, Self); // exit the monitor
1119 guarantee(_owner != Self, "invariant");
1120 return save;
1121 }
1122
1123 // reenter() enters a lock and sets recursion count
1124 // complete_exit/reenter operate as a wait without waiting
1125 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1126 Thread * const Self = THREAD;
1127 assert(Self->is_Java_thread(), "Must be Java thread!");
1128 JavaThread *jt = (JavaThread *)THREAD;
1129
1130 guarantee(_owner != Self, "reenter already owner");
1131 enter(THREAD); // enter the monitor
1132 guarantee(_recursions == 0, "reenter recursion");
1133 _recursions = recursions;
1134 return;
1135 }
1136
1137
1138 // -----------------------------------------------------------------------------
1139 // A macro is used below because there may already be a pending
1140 // exception which should not abort the execution of the routines
1141 // which use this (which is why we don't put this into check_slow and
1142 // call it with a CHECK argument).
1143
1144 #define CHECK_OWNER() \
1145 do { \
1146 if (THREAD != _owner) { \
1147 if (THREAD->is_lock_owned((address) _owner)) { \
1148 _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \
1149 _recursions = 0; \
1150 } else { \
1151 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1152 } \
1153 } \
1154 } while (false)
1342 //
1343 // We redo the unpark() to ensure forward progress, i.e., we
1344 // don't want all pending threads hanging (parked) with none
1345 // entering the unlocked monitor.
1346 node._event->unpark();
1347 }
1348 }
1349
1350 if (event.should_commit()) {
1351 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1352 }
1353
1354 OrderAccess::fence();
1355
1356 assert(Self->_Stalled != 0, "invariant");
1357 Self->_Stalled = 0;
1358
1359 assert(_owner != Self, "invariant");
1360 ObjectWaiter::TStates v = node.TState;
1361 if (v == ObjectWaiter::TS_RUN) {
1362 enter(Self);
1363 } else {
1364 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1365 ReenterI(Self, &node);
1366 node.wait_reenter_end(this);
1367 }
1368
1369 // Self has reacquired the lock.
1370 // Lifecycle - the node representing Self must not appear on any queues.
1371 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1372 // want residual elements associated with this thread left on any lists.
1373 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1374 assert(_owner == Self, "invariant");
1375 assert(_succ != Self, "invariant");
1376 } // OSThreadWaitState()
1377
1378 jt->set_current_waiting_monitor(NULL);
1379
1380 guarantee(_recursions == 0, "invariant");
1381 _recursions = save; // restore the old recursion count
1382 _waiters--; // decrement the number of waiters
1904 n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events, \
1905 CHECK); \
1906 }
1907 #define NEWPERFVARIABLE(n) \
1908 { \
1909 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
1910 CHECK); \
1911 }
1912 NEWPERFCOUNTER(_sync_Inflations);
1913 NEWPERFCOUNTER(_sync_Deflations);
1914 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
1915 NEWPERFCOUNTER(_sync_FutileWakeups);
1916 NEWPERFCOUNTER(_sync_Parks);
1917 NEWPERFCOUNTER(_sync_Notifications);
1918 NEWPERFVARIABLE(_sync_MonExtant);
1919 #undef NEWPERFCOUNTER
1920 #undef NEWPERFVARIABLE
1921 }
1922
1923 DEBUG_ONLY(InitDone = true;)
1924 }
|
221 //
222 // * See also http://blogs.sun.com/dave
223
224
225 void* ObjectMonitor::operator new (size_t size) throw() {
226 return AllocateHeap(size, mtInternal);
227 }
228 void* ObjectMonitor::operator new[] (size_t size) throw() {
229 return operator new (size);
230 }
231 void ObjectMonitor::operator delete(void* p) {
232 FreeHeap(p);
233 }
234 void ObjectMonitor::operator delete[] (void *p) {
235 operator delete(p);
236 }
237
238 // -----------------------------------------------------------------------------
239 // Enter support
240
241 bool ObjectMonitor::enter(TRAPS) {
242 // The following code is ordered to check the most common cases first
243 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
244 Thread * const Self = THREAD;
245
246 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
247 if (cur == NULL) {
248 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
249 assert(_recursions == 0, "invariant");
250 assert(_owner == Self, "invariant");
251 return true;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return true;
258 }
259
260 if (Self->is_lock_owned ((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 // Commute owner from a thread-specific on-stack BasicLockObject address to
264 // a full-fledged "Thread *".
265 _owner = Self;
266 return true;
267 }
268
269 // We've encountered genuine contention.
270 assert(Self->_Stalled == 0, "invariant");
271 Self->_Stalled = intptr_t(this);
272
273 // Try one round of spinning *before* enqueueing Self
274 // and before going through the awkward and expensive state
275 // transitions. The following spin is strictly optional ...
276 // Note that if we acquire the monitor from an initial spin
277 // we forgo posting JVMTI events and firing DTRACE probes.
278 if (TrySpin(Self) > 0) {
279 assert(_owner == Self, "invariant");
280 assert(_recursions == 0, "invariant");
281 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
282 Self->_Stalled = 0;
283 return true;
284 }
285
286 assert(_owner != Self, "invariant");
287 assert(_succ != Self, "invariant");
288 assert(Self->is_Java_thread(), "invariant");
289 JavaThread * jt = (JavaThread *) Self;
290 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
291 assert(jt->thread_state() != _thread_blocked, "invariant");
292 assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
293 assert(AsyncDeflateIdleMonitors || _count >= 0, "invariant");
294
295 // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy().
296 // Ensure the object-monitor relationship remains stable while there's contention.
297 const jint count = Atomic::add(1, &_count);
298 if (count <= 0 && _owner == DEFLATER_MARKER) {
299 // Async deflation in progress. Help deflater thread install
300 // the mark word (in case deflater thread is slow).
301 install_displaced_markword_in_object();
302 Self->_Stalled = 0;
303 return false; // Caller should retry. Never mind about _count as this monitor has been deflated.
304 }
305 // The deflater thread will not deflate this monitor and the monitor is contended, continue.
306
307 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
308 EventJavaMonitorEnter event;
309 if (event.should_commit()) {
310 event.set_monitorClass(((oop)this->object())->klass());
311 event.set_address((uintptr_t)(this->object_addr()));
312 }
313
314 { // Change java thread status to indicate blocked on monitor enter.
315 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
316
317 Self->set_current_pending_monitor(this);
318
319 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
320 if (JvmtiExport::should_post_monitor_contended_enter()) {
321 JvmtiExport::post_monitor_contended_enter(jt, this);
322
323 // The current thread does not yet own the monitor and does not
324 // yet appear on any queues that would get it made the successor.
325 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
347 //
348 _recursions = 0;
349 _succ = NULL;
350 exit(false, Self);
351
352 jt->java_suspend_self();
353 }
354 Self->set_current_pending_monitor(NULL);
355
356 // We cleared the pending monitor info since we've just gotten past
357 // the enter-check-for-suspend dance and we now own the monitor free
358 // and clear, i.e., it is no longer pending. The ThreadBlockInVM
359 // destructor can go to a safepoint at the end of this block. If we
360 // do a thread dump during that safepoint, then this thread will show
361 // as having "-locked" the monitor, but the OS and java.lang.Thread
362 // states will still report that the thread is blocked trying to
363 // acquire it.
364 }
365
366 Atomic::dec(&_count);
367 assert(AsyncDeflateIdleMonitors || _count >= 0, "invariant");
368 Self->_Stalled = 0;
369
370 // Must either set _recursions = 0 or ASSERT _recursions == 0.
371 assert(_recursions == 0, "invariant");
372 assert(_owner == Self, "invariant");
373 assert(_succ != Self, "invariant");
374 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
375
376 // The thread -- now the owner -- is back in vm mode.
377 // Report the glorious news via TI,DTrace and jvmstat.
378 // The probe effect is non-trivial. All the reportage occurs
379 // while we hold the monitor, increasing the length of the critical
380 // section. Amdahl's parallel speedup law comes vividly into play.
381 //
382 // Another option might be to aggregate the events (thread local or
383 // per-monitor aggregation) and defer reporting until a more opportune
384 // time -- such as next time some thread encounters contention but has
385 // yet to acquire the lock. While spinning that thread could
386 // spinning we could increment JVMStat counters, etc.
387
388 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
389 if (JvmtiExport::should_post_monitor_contended_entered()) {
390 JvmtiExport::post_monitor_contended_entered(jt, this);
391
392 // The current thread already owns the monitor and is not going to
393 // call park() for the remainder of the monitor enter protocol. So
394 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
395 // event handler consumed an unpark() issued by the thread that
396 // just exited the monitor.
397 }
398 if (event.should_commit()) {
399 event.set_previousOwner((uintptr_t)_previous_owner_tid);
400 event.commit();
401 }
402 OM_PERFDATA_OP(ContendedLockAttempts, inc());
403 return true;
404 }
405
406 // Caveat: TryLock() is not necessarily serializing if it returns failure.
407 // Callers must compensate as needed.
408
409 int ObjectMonitor::TryLock(Thread * Self) {
410 void * own = _owner;
411 if (own != NULL) return 0;
412 if (Atomic::replace_if_null(Self, &_owner)) {
413 // Either guarantee _recursions == 0 or set _recursions = 0.
414 assert(_recursions == 0, "invariant");
415 assert(_owner == Self, "invariant");
416 return 1;
417 }
418 // The lock had been free momentarily, but we lost the race to the lock.
419 // Interference -- the CAS failed.
420 // We can either return -1 or retry.
421 // Retry doesn't make as much sense because the lock was just acquired.
422 return -1;
423 }
424
425 // Install the displaced markword of a deflated monitor into the object
426 // associated with the monitor.
427 // This method is idempotent and is executed by both mutators wanting to
428 // acquire a monitor for an object and the thread deflating monitors.
429 // A mutator trying to install a hash in the monitor's _header field can
430 // also run in parallel to this method.
431 void ObjectMonitor::install_displaced_markword_in_object() {
432 markOop dmw = header();
433 if (dmw == NULL) {
434 // The thread deflating monitors has won the race so we
435 // have nothing to do.
436 return;
437 }
438
439 // A non-NULL dmw has to be either neutral or is participating in
440 // this restoration protocol.
441 assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
442 "failed precondition: is_neutral=%d, is_marked=%d, hash="
443 INTPTR_FORMAT, dmw->is_neutral(), dmw->is_marked(), dmw->hash());
444
445 if (!dmw->is_marked() && dmw->hash() == 0) {
446 // This dmw is neutral and has not yet started the restoration
447 // protocol so we mark a copy of the dmw to begin the protocol.
448 markOop marked_dmw = dmw->set_marked();
449 assert(marked_dmw->is_marked() && marked_dmw->hash() == 0,
450 "sanity_check: is_marked=%d, hash=" INTPTR_FORMAT,
451 marked_dmw->is_marked(), marked_dmw->hash());
452
453 // There can be three different racers trying to update the _header
454 // field and the return dmw value will tell us what cleanup needs
455 // to be done (if any) after the race winner:
456 // 1) A mutator trying to install a hash in the object.
457 // Note: That mutator is not executing this code, but it is
458 // trying to update the _header field.
459 // If winner: dmw will contain the hash and be unmarked
460 // 2a) A mutator trying to acquire the monitor via enter():
461 // If winner: dmw is marked and hash() == 0
462 // 2b) The thread deflating the monitor via deflate_monitor_using_JT():
463 // If winner: dmw is marked and hash() == 0
464 dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
465 }
466
467 if (dmw->is_marked()) {
468 // The dmw copy is marked which means a hash was not set by a racing
469 // thread. Clear the mark from the copy in preparation for possible
470 // restoration from this thread.
471 assert(dmw->hash() == 0, "must be 0: hash=" INTPTR_FORMAT, dmw->hash());
472 dmw = dmw->set_unmarked();
473 }
474 assert(dmw->is_neutral(), "must be a neutral markword");
475
476 oop const obj = (oop) object();
477 // Install displaced markword if object markword still points to this
478 // monitor. Both the mutator trying to enter() and the thread deflating
479 // the monitor will reach this point, but only one can win.
480 // Note: If a mutator won the cmpxchg() race above and installed a hash
481 // in _header, then the updated dmw contains that hash and we'll install
482 // it in the object's markword here.
483 obj->cas_set_mark(dmw, markOopDesc::encode(this));
484 }
485
486 #define MAX_RECHECK_INTERVAL 1000
487
488 void ObjectMonitor::EnterI(TRAPS) {
489 Thread * const Self = THREAD;
490 assert(Self->is_Java_thread(), "invariant");
491 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
492
493 // Try the lock - TATAS
494 if (TryLock (Self) > 0) {
495 assert(_succ != Self, "invariant");
496 assert(_owner == Self, "invariant");
497 assert(_Responsible != Self, "invariant");
498 return;
499 }
500
501 if (_owner == DEFLATER_MARKER) {
502 guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller");
503 // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up.
504 // Try to acquire monitor.
505 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
506 assert(_succ != Self, "invariant");
507 assert(_owner == Self, "invariant");
508 assert(_Responsible != Self, "invariant");
509 return;
510 }
511 }
512
513 assert(InitDone, "Unexpectedly not initialized");
514
515 // We try one round of spinning *before* enqueueing Self.
516 //
517 // If the _owner is ready but OFFPROC we could use a YieldTo()
518 // operation to donate the remainder of this thread's quantum
519 // to the owner. This has subtle but beneficial affinity
520 // effects.
521
522 if (TrySpin(Self) > 0) {
523 assert(_owner == Self, "invariant");
524 assert(_succ != Self, "invariant");
525 assert(_Responsible != Self, "invariant");
526 return;
527 }
528
529 // The Spin failed -- Enqueue and park the thread ...
530 assert(_succ != Self, "invariant");
531 assert(_owner != Self, "invariant");
532 assert(_Responsible != Self, "invariant");
609
610 for (;;) {
611
612 if (TryLock(Self) > 0) break;
613 assert(_owner != Self, "invariant");
614
615 // park self
616 if (_Responsible == Self) {
617 Self->_ParkEvent->park((jlong) recheckInterval);
618 // Increase the recheckInterval, but clamp the value.
619 recheckInterval *= 8;
620 if (recheckInterval > MAX_RECHECK_INTERVAL) {
621 recheckInterval = MAX_RECHECK_INTERVAL;
622 }
623 } else {
624 Self->_ParkEvent->park();
625 }
626
627 if (TryLock(Self) > 0) break;
628
629 if (_owner == DEFLATER_MARKER) {
630 guarantee(0 < _count, "_owner == DEFLATER_MARKER && _count <= 0 should have been handled by the caller");
631 // Deflater thread tried to lock this monitor, but it failed to make _count negative and gave up.
632 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
633 // Acquired the monitor.
634 break;
635 }
636 }
637
638 // The lock is still contested.
639 // Keep a tally of the # of futile wakeups.
640 // Note that the counter is not protected by a lock or updated by atomics.
641 // That is by design - we trade "lossy" counters which are exposed to
642 // races during updates for a lower probe effect.
643
644 // This PerfData object can be used in parallel with a safepoint.
645 // See the work around in PerfDataManager::destroy().
646 OM_PERFDATA_OP(FutileWakeups, inc());
647 ++nWakeups;
648
649 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
650 // We can defer clearing _succ until after the spin completes
651 // TrySpin() must tolerate being called with _succ == Self.
652 // Try yet another round of adaptive spinning.
653 if (TrySpin(Self) > 0) break;
654
655 // We can find that we were unpark()ed and redesignated _succ while
656 // we were spinning. That's harmless. If we iterate and call park(),
657 // park() will consume the event and return immediately and we'll
739 // In the future we should reconcile EnterI() and ReenterI().
740
741 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
742 assert(Self != NULL, "invariant");
743 assert(SelfNode != NULL, "invariant");
744 assert(SelfNode->_thread == Self, "invariant");
745 assert(_waiters > 0, "invariant");
746 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
747 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
748 JavaThread * jt = (JavaThread *) Self;
749
750 int nWakeups = 0;
751 for (;;) {
752 ObjectWaiter::TStates v = SelfNode->TState;
753 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
754 assert(_owner != Self, "invariant");
755
756 if (TryLock(Self) > 0) break;
757 if (TrySpin(Self) > 0) break;
758
759 if (_owner == DEFLATER_MARKER) {
760 guarantee(0 <= _count, "Impossible: _owner == DEFLATER_MARKER && _count < 0, monitor must not be owned by deflater thread here");
761 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
762 // Acquired the monitor.
763 break;
764 }
765 }
766
767 // State transition wrappers around park() ...
768 // ReenterI() wisely defers state transitions until
769 // it's clear we must park the thread.
770 {
771 OSThreadContendState osts(Self->osthread());
772 ThreadBlockInVM tbivm(jt);
773
774 // cleared by handle_special_suspend_equivalent_condition()
775 // or java_suspend_self()
776 jt->set_suspend_equivalent();
777 Self->_ParkEvent->park();
778
779 // were we externally suspended while we were waiting?
780 for (;;) {
781 if (!ExitSuspendEquivalent(jt)) break;
782 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
783 jt->java_suspend_self();
784 jt->set_suspend_equivalent();
785 }
786 }
1204 assert(InitDone, "Unexpectedly not initialized");
1205
1206 if (THREAD != _owner) {
1207 if (THREAD->is_lock_owned ((address)_owner)) {
1208 assert(_recursions == 0, "internal state error");
1209 _owner = THREAD; // Convert from basiclock addr to Thread addr
1210 _recursions = 0;
1211 }
1212 }
1213
1214 guarantee(Self == _owner, "complete_exit not owner");
1215 intptr_t save = _recursions; // record the old recursion count
1216 _recursions = 0; // set the recursion level to be 0
1217 exit(true, Self); // exit the monitor
1218 guarantee(_owner != Self, "invariant");
1219 return save;
1220 }
1221
1222 // reenter() enters a lock and sets recursion count
1223 // complete_exit/reenter operate as a wait without waiting
1224 bool ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1225 Thread * const Self = THREAD;
1226 assert(Self->is_Java_thread(), "Must be Java thread!");
1227 JavaThread *jt = (JavaThread *)THREAD;
1228
1229 guarantee(_owner != Self, "reenter already owner");
1230 if (!enter(THREAD)) {
1231 // Failed to enter the monitor so return for a retry.
1232 return false;
1233 }
1234 // Entered the monitor.
1235 guarantee(_recursions == 0, "reenter recursion");
1236 _recursions = recursions;
1237 return true;
1238 }
1239
1240
1241 // -----------------------------------------------------------------------------
1242 // A macro is used below because there may already be a pending
1243 // exception which should not abort the execution of the routines
1244 // which use this (which is why we don't put this into check_slow and
1245 // call it with a CHECK argument).
1246
1247 #define CHECK_OWNER() \
1248 do { \
1249 if (THREAD != _owner) { \
1250 if (THREAD->is_lock_owned((address) _owner)) { \
1251 _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \
1252 _recursions = 0; \
1253 } else { \
1254 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
1255 } \
1256 } \
1257 } while (false)
1445 //
1446 // We redo the unpark() to ensure forward progress, i.e., we
1447 // don't want all pending threads hanging (parked) with none
1448 // entering the unlocked monitor.
1449 node._event->unpark();
1450 }
1451 }
1452
1453 if (event.should_commit()) {
1454 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1455 }
1456
1457 OrderAccess::fence();
1458
1459 assert(Self->_Stalled != 0, "invariant");
1460 Self->_Stalled = 0;
1461
1462 assert(_owner != Self, "invariant");
1463 ObjectWaiter::TStates v = node.TState;
1464 if (v == ObjectWaiter::TS_RUN) {
1465 const bool success = enter(Self);
1466 guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0");
1467 } else {
1468 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1469 ReenterI(Self, &node);
1470 node.wait_reenter_end(this);
1471 }
1472
1473 // Self has reacquired the lock.
1474 // Lifecycle - the node representing Self must not appear on any queues.
1475 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1476 // want residual elements associated with this thread left on any lists.
1477 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1478 assert(_owner == Self, "invariant");
1479 assert(_succ != Self, "invariant");
1480 } // OSThreadWaitState()
1481
1482 jt->set_current_waiting_monitor(NULL);
1483
1484 guarantee(_recursions == 0, "invariant");
1485 _recursions = save; // restore the old recursion count
1486 _waiters--; // decrement the number of waiters
2008 n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events, \
2009 CHECK); \
2010 }
2011 #define NEWPERFVARIABLE(n) \
2012 { \
2013 n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events, \
2014 CHECK); \
2015 }
2016 NEWPERFCOUNTER(_sync_Inflations);
2017 NEWPERFCOUNTER(_sync_Deflations);
2018 NEWPERFCOUNTER(_sync_ContendedLockAttempts);
2019 NEWPERFCOUNTER(_sync_FutileWakeups);
2020 NEWPERFCOUNTER(_sync_Parks);
2021 NEWPERFCOUNTER(_sync_Notifications);
2022 NEWPERFVARIABLE(_sync_MonExtant);
2023 #undef NEWPERFCOUNTER
2024 #undef NEWPERFVARIABLE
2025 }
2026
2027 DEBUG_ONLY(InitDone = true;)
2028 }
2029
2030 // For internal used by ObjectSynchronizer::monitors_iterate().
2031 ObjectMonitorHandle::ObjectMonitorHandle(ObjectMonitor * om_ptr) {
2032 om_ptr->inc_ref_count();
2033 _om_ptr = om_ptr;
2034 }
2035
2036 ObjectMonitorHandle::~ObjectMonitorHandle() {
2037 if (_om_ptr != NULL) {
2038 _om_ptr->dec_ref_count();
2039 _om_ptr = NULL;
2040 }
2041 }
2042
2043 // Save the ObjectMonitor* associated with the specified markOop and
2044 // increment the ref_count. This function should only be called if
2045 // the caller has verified mark->has_monitor() == true. The object
2046 // parameter is needed to verify that ObjectMonitor* has not been
2047 // deflated and reused for another object.
2048 //
2049 // This function returns true if the ObjectMonitor* has been safely
2050 // saved. This function returns false if we have lost a race with
2051 // async deflation; the caller should retry as appropriate.
2052 //
2053 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2054 guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2055 p2i((address)mark));
2056
2057 ObjectMonitor * om_ptr = mark->monitor();
2058 om_ptr->inc_ref_count();
2059
2060 if (AsyncDeflateIdleMonitors) {
2061 // Race here if monitor is not owned! The above ref_count bump
2062 // will cause subsequent async deflation to skip it. However,
2063 // previous or concurrent async deflation is a race.
2064 if (om_ptr->_owner == DEFLATER_MARKER) {
2065 // Async deflation won the race so we have to retry.
2066 om_ptr->dec_ref_count();
2067 return false;
2068 }
2069 // The ObjectMonitor could have been deflated and reused for
2070 // another object before we bumped the ref_count so make sure
2071 // our object still refers to this ObjectMonitor.
2072 const markOop tmp = object->mark();
2073 if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2074 // Async deflation and reuse won the race so we have to retry.
2075 om_ptr->dec_ref_count();
2076 return false;
2077 }
2078 }
2079
2080 guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2081 p2i(_om_ptr));
2082 _om_ptr = om_ptr;
2083 return true;
2084 }
2085
2086 // For internal use by ObjectSynchronizer::inflate().
2087 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2088 // Cannot guarantee() is_new() here. As soon as the ObjectMonitor*
2089 // is attached to the object in inflate(), it can be used by other
2090 // JavaThreads.
2091 // guarantee(om_ptr->is_new(), "sanity check: allocation_state=%d",
2092 // int(om_ptr->allocation_state()));
2093 om_ptr->inc_ref_count();
2094 _om_ptr = om_ptr;
2095 }
|