283 "object mark must match encoded this: mark=" INTPTR_FORMAT
284 ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
285 p2i(markOopDesc::encode(this)));
286 Self->_Stalled = 0;
287 return true;
288 }
289
290 assert(_owner != Self, "invariant");
291 assert(_succ != Self, "invariant");
292 assert(Self->is_Java_thread(), "invariant");
293 JavaThread * jt = (JavaThread *) Self;
294 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
295 assert(jt->thread_state() != _thread_blocked, "invariant");
296 assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
297 assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
298
299 // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy().
300 // Ensure the object-monitor relationship remains stable while there's contention.
301 const jint contentions = Atomic::add(1, &_contentions);
302 if (contentions <= 0 && _owner == DEFLATER_MARKER) {
303 // Async deflation in progress. Help deflater thread install
304 // the mark word (in case deflater thread is slow).
305 install_displaced_markword_in_object();
306 Self->_Stalled = 0;
307 return false; // Caller should retry. Never mind about _contentions as this monitor has been deflated.
308 }
309 // The deflater thread will not deflate this monitor and the monitor is contended, continue.
310
311 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
312 EventJavaMonitorEnter event;
313 if (event.should_commit()) {
314 event.set_monitorClass(((oop)this->object())->klass());
315 event.set_address((uintptr_t)(this->object_addr()));
316 }
317
318 { // Change java thread status to indicate blocked on monitor enter.
319 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
320
321 Self->set_current_pending_monitor(this);
322
323 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
324 if (JvmtiExport::should_post_monitor_contended_enter()) {
325 JvmtiExport::post_monitor_contended_enter(jt, this);
409
410 // Caveat: TryLock() is not necessarily serializing if it returns failure.
411 // Callers must compensate as needed.
412
413 int ObjectMonitor::TryLock(Thread * Self) {
414 void * own = _owner;
415 if (own != NULL) return 0;
416 if (Atomic::replace_if_null(Self, &_owner)) {
417 // Either guarantee _recursions == 0 or set _recursions = 0.
418 assert(_recursions == 0, "invariant");
419 assert(_owner == Self, "invariant");
420 return 1;
421 }
422 // The lock had been free momentarily, but we lost the race to the lock.
423 // Interference -- the CAS failed.
424 // We can either return -1 or retry.
425 // Retry doesn't make as much sense because the lock was just acquired.
426 return -1;
427 }
428
429 // Install the displaced markword of a deflated monitor into the object
430 // associated with the monitor.
431 // This method is idempotent and is executed by both mutators wanting to
432 // acquire a monitor for an object and the thread deflating monitors.
433 // A mutator trying to install a hash in the monitor's _header field can
434 // also run in parallel to this method.
435 void ObjectMonitor::install_displaced_markword_in_object() {
436 markOop dmw = header();
437 if (dmw == NULL) {
438 // The thread deflating monitors has won the race so we
439 // have nothing to do.
440 return;
441 }
442
443 // A non-NULL dmw has to be either neutral or is participating in
444 // this restoration protocol.
445 assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
446 "failed precondition: is_neutral=%d, is_marked=%d, hash="
447 INTPTR_FORMAT, dmw->is_neutral(), dmw->is_marked(), dmw->hash());
448
449 if (!dmw->is_marked() && dmw->hash() == 0) {
450 // This dmw is neutral and has not yet started the restoration
451 // protocol so we mark a copy of the dmw to begin the protocol.
452 markOop marked_dmw = dmw->set_marked();
453 assert(marked_dmw->is_marked() && marked_dmw->hash() == 0,
454 "sanity_check: is_marked=%d, hash=" INTPTR_FORMAT,
455 marked_dmw->is_marked(), marked_dmw->hash());
456
457 // There can be three different racers trying to update the _header
458 // field and the return dmw value will tell us what cleanup needs
459 // to be done (if any) after the race winner:
460 // 1) A mutator trying to install a hash in the object.
461 // Note: That mutator is not executing this code, but it is
462 // trying to update the _header field.
463 // If winner: dmw will contain the hash and be unmarked
464 // 2a) A mutator trying to acquire the monitor via enter():
465 // If winner: dmw is marked and hash() == 0
466 // 2b) The thread deflating the monitor via deflate_monitor_using_JT():
467 // If winner: dmw is marked and hash() == 0
468 dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
469 }
470
471 if (dmw->is_marked()) {
472 // The dmw copy is marked which means a hash was not set by a racing
473 // thread. Clear the mark from the copy in preparation for possible
474 // restoration from this thread.
475 assert(dmw->hash() == 0, "must be 0: hash=" INTPTR_FORMAT, dmw->hash());
476 dmw = dmw->set_unmarked();
477 }
478 assert(dmw->is_neutral(), "must be a neutral markword");
479
480 oop const obj = (oop) object();
481 // Install displaced markword if object markword still points to this
482 // monitor. Both the mutator trying to enter() and the thread deflating
483 // the monitor will reach this point, but only one can win.
484 // Note: If a mutator won the cmpxchg() race above and installed a hash
485 // in _header, then the updated dmw contains that hash and we'll install
486 // it in the object's markword here.
487 obj->cas_set_mark(dmw, markOopDesc::encode(this));
488 }
489
490 #define MAX_RECHECK_INTERVAL 1000
491
492 void ObjectMonitor::EnterI(TRAPS) {
493 Thread * const Self = THREAD;
494 assert(Self->is_Java_thread(), "invariant");
495 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
496
497 // Try the lock - TATAS
498 if (TryLock (Self) > 0) {
499 assert(_succ != Self, "invariant");
500 assert(_owner == Self, "invariant");
501 assert(_Responsible != Self, "invariant");
502 return;
503 }
504
505 if (_owner == DEFLATER_MARKER) {
506 guarantee(0 < _contentions, "_owner == DEFLATER_MARKER && _contentions <= 0 should have been handled by the caller");
507 // Deflater thread tried to lock this monitor, but it failed to make _contentions negative and gave up.
508 // Try to acquire monitor.
509 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
510 assert(_succ != Self, "invariant");
511 assert(_owner == Self, "invariant");
512 assert(_Responsible != Self, "invariant");
513 return;
514 }
515 }
516
517 assert(InitDone, "Unexpectedly not initialized");
518
519 // We try one round of spinning *before* enqueueing Self.
520 //
521 // If the _owner is ready but OFFPROC we could use a YieldTo()
522 // operation to donate the remainder of this thread's quantum
523 // to the owner. This has subtle but beneficial affinity
524 // effects.
525
526 if (TrySpin(Self) > 0) {
527 assert(_owner == Self, "invariant");
528 assert(_succ != Self, "invariant");
529 assert(_Responsible != Self, "invariant");
530 return;
531 }
614 for (;;) {
615
616 if (TryLock(Self) > 0) break;
617 assert(_owner != Self, "invariant");
618
619 // park self
620 if (_Responsible == Self) {
621 Self->_ParkEvent->park((jlong) recheckInterval);
622 // Increase the recheckInterval, but clamp the value.
623 recheckInterval *= 8;
624 if (recheckInterval > MAX_RECHECK_INTERVAL) {
625 recheckInterval = MAX_RECHECK_INTERVAL;
626 }
627 } else {
628 Self->_ParkEvent->park();
629 }
630
631 if (TryLock(Self) > 0) break;
632
633 if (_owner == DEFLATER_MARKER) {
634 guarantee(0 < _contentions, "_owner == DEFLATER_MARKER && _contentions <= 0 should have been handled by the caller");
635 // Deflater thread tried to lock this monitor, but it failed to make _contentions negative and gave up.
636 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
637 // Acquired the monitor.
638 break;
639 }
640 }
641
642 // The lock is still contested.
643 // Keep a tally of the # of futile wakeups.
644 // Note that the counter is not protected by a lock or updated by atomics.
645 // That is by design - we trade "lossy" counters which are exposed to
646 // races during updates for a lower probe effect.
647
648 // This PerfData object can be used in parallel with a safepoint.
649 // See the work around in PerfDataManager::destroy().
650 OM_PERFDATA_OP(FutileWakeups, inc());
651 ++nWakeups;
652
653 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
654 // We can defer clearing _succ until after the spin completes
655 // TrySpin() must tolerate being called with _succ == Self.
744
745 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
746 assert(Self != NULL, "invariant");
747 assert(SelfNode != NULL, "invariant");
748 assert(SelfNode->_thread == Self, "invariant");
749 assert(_waiters > 0, "invariant");
750 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
751 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
752 JavaThread * jt = (JavaThread *) Self;
753
754 int nWakeups = 0;
755 for (;;) {
756 ObjectWaiter::TStates v = SelfNode->TState;
757 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
758 assert(_owner != Self, "invariant");
759
760 if (TryLock(Self) > 0) break;
761 if (TrySpin(Self) > 0) break;
762
763 if (_owner == DEFLATER_MARKER) {
764 guarantee(0 <= _contentions, "Impossible: _owner == DEFLATER_MARKER && _contentions < 0, monitor must not be owned by deflater thread here");
765 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
766 // Acquired the monitor.
767 break;
768 }
769 }
770
771 // State transition wrappers around park() ...
772 // ReenterI() wisely defers state transitions until
773 // it's clear we must park the thread.
774 {
775 OSThreadContendState osts(Self->osthread());
776 ThreadBlockInVM tbivm(jt);
777
778 // cleared by handle_special_suspend_equivalent_condition()
779 // or java_suspend_self()
780 jt->set_suspend_equivalent();
781 Self->_ParkEvent->park();
782
783 // were we externally suspended while we were waiting?
784 for (;;) {
958 Thread * const Self = THREAD;
959 if (THREAD != _owner) {
960 if (THREAD->is_lock_owned((address) _owner)) {
961 // Transmute _owner from a BasicLock pointer to a Thread address.
962 // We don't need to hold _mutex for this transition.
963 // Non-null to Non-null is safe as long as all readers can
964 // tolerate either flavor.
965 assert(_recursions == 0, "invariant");
966 _owner = THREAD;
967 _recursions = 0;
968 } else {
969 // Apparent unbalanced locking ...
970 // Naively we'd like to throw IllegalMonitorStateException.
971 // As a practical matter we can neither allocate nor throw an
972 // exception as ::exit() can be called from leaf routines.
973 // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
974 // Upon deeper reflection, however, in a properly run JVM the only
975 // way we should encounter this situation is in the presence of
976 // unbalanced JNI locking. TODO: CheckJNICalls.
977 // See also: CR4414101
978 assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
979 return;
980 }
981 }
982
983 if (_recursions != 0) {
984 _recursions--; // this is simple recursive enter
985 return;
986 }
987
988 // Invariant: after setting Responsible=null an thread must execute
989 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
990 _Responsible = NULL;
991
992 #if INCLUDE_JFR
993 // get the owner's thread id for the MonitorEnter event
994 // if it is enabled and the thread isn't suspended
995 if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
996 _previous_owner_tid = JFR_THREAD_ID(Self);
997 }
998 #endif
1450 // We redo the unpark() to ensure forward progress, i.e., we
1451 // don't want all pending threads hanging (parked) with none
1452 // entering the unlocked monitor.
1453 node._event->unpark();
1454 }
1455 }
1456
1457 if (event.should_commit()) {
1458 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1459 }
1460
1461 OrderAccess::fence();
1462
1463 assert(Self->_Stalled != 0, "invariant");
1464 Self->_Stalled = 0;
1465
1466 assert(_owner != Self, "invariant");
1467 ObjectWaiter::TStates v = node.TState;
1468 if (v == ObjectWaiter::TS_RUN) {
1469 const bool success = enter(Self);
1470 guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0");
1471 } else {
1472 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1473 ReenterI(Self, &node);
1474 node.wait_reenter_end(this);
1475 }
1476
1477 // Self has reacquired the lock.
1478 // Lifecycle - the node representing Self must not appear on any queues.
1479 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1480 // want residual elements associated with this thread left on any lists.
1481 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1482 assert(_owner == Self, "invariant");
1483 assert(_succ != Self, "invariant");
1484 } // OSThreadWaitState()
1485
1486 jt->set_current_waiting_monitor(NULL);
1487
1488 guarantee(_recursions == 0, "invariant");
1489 _recursions = save; // restore the old recursion count
1490 _waiters--; // decrement the number of waiters
2039
2040 ObjectMonitorHandle::~ObjectMonitorHandle() {
2041 if (_om_ptr != NULL) {
2042 _om_ptr->dec_ref_count();
2043 _om_ptr = NULL;
2044 }
2045 }
2046
2047 // Save the ObjectMonitor* associated with the specified markOop and
2048 // increment the ref_count. This function should only be called if
2049 // the caller has verified mark->has_monitor() == true. The object
2050 // parameter is needed to verify that ObjectMonitor* has not been
2051 // deflated and reused for another object.
2052 //
2053 // This function returns true if the ObjectMonitor* has been safely
2054 // saved. This function returns false if we have lost a race with
2055 // async deflation; the caller should retry as appropriate.
2056 //
2057 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2058 guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2059 p2i((address)mark));
2060
2061 ObjectMonitor * om_ptr = mark->monitor();
2062 om_ptr->inc_ref_count();
2063
2064 if (AsyncDeflateIdleMonitors) {
2065 // Race here if monitor is not owned! The above ref_count bump
2066 // will cause subsequent async deflation to skip it. However,
2067 // previous or concurrent async deflation is a race.
2068 if (om_ptr->_owner == DEFLATER_MARKER) {
2069 // Async deflation won the race so we have to retry.
2070 om_ptr->dec_ref_count();
2071 return false;
2072 }
2073 // The ObjectMonitor could have been deflated and reused for
2074 // another object before we bumped the ref_count so make sure
2075 // our object still refers to this ObjectMonitor.
2076 const markOop tmp = object->mark();
2077 if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2078 // Async deflation and reuse won the race so we have to retry.
2079 om_ptr->dec_ref_count();
2080 return false;
2081 }
2082 }
2083
2084 guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2085 p2i(_om_ptr));
2086 _om_ptr = om_ptr;
2087 return true;
2088 }
2089
2090 // For internal use by ObjectSynchronizer::inflate().
2091 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2092 // Cannot guarantee() is_new() here. As soon as the ObjectMonitor*
2093 // is attached to the object in inflate(), it can be used by other
2094 // JavaThreads.
2095 // guarantee(om_ptr->is_new(), "sanity check: allocation_state=%d",
2096 // int(om_ptr->allocation_state()));
2097 om_ptr->inc_ref_count();
2098 _om_ptr = om_ptr;
2099 }
|
283 "object mark must match encoded this: mark=" INTPTR_FORMAT
284 ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
285 p2i(markOopDesc::encode(this)));
286 Self->_Stalled = 0;
287 return true;
288 }
289
290 assert(_owner != Self, "invariant");
291 assert(_succ != Self, "invariant");
292 assert(Self->is_Java_thread(), "invariant");
293 JavaThread * jt = (JavaThread *) Self;
294 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
295 assert(jt->thread_state() != _thread_blocked, "invariant");
296 assert(AsyncDeflateIdleMonitors || this->object() != NULL, "invariant");
297 assert(AsyncDeflateIdleMonitors || _contentions >= 0, "invariant");
298
299 // Prevent deflation. See ObjectSynchronizer::deflate_monitor() and is_busy().
300 // Ensure the object-monitor relationship remains stable while there's contention.
301 const jint contentions = Atomic::add(1, &_contentions);
302 if (contentions <= 0 && _owner == DEFLATER_MARKER) {
303 // Async deflation is in progress. Attempt to restore the
304 // header/dmw to the object's header so that we only retry once
305 // if the deflater thread happens to be slow.
306 const oop obj = (oop) object();
307 install_displaced_markword_in_object(obj);
308 Self->_Stalled = 0;
309 return false; // Caller should retry. Never mind about _contentions as this monitor has been deflated.
310 }
311 // The deflater thread will not deflate this monitor and the monitor is contended, continue.
312
313 JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
314 EventJavaMonitorEnter event;
315 if (event.should_commit()) {
316 event.set_monitorClass(((oop)this->object())->klass());
317 event.set_address((uintptr_t)(this->object_addr()));
318 }
319
320 { // Change java thread status to indicate blocked on monitor enter.
321 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
322
323 Self->set_current_pending_monitor(this);
324
325 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
326 if (JvmtiExport::should_post_monitor_contended_enter()) {
327 JvmtiExport::post_monitor_contended_enter(jt, this);
411
412 // Caveat: TryLock() is not necessarily serializing if it returns failure.
413 // Callers must compensate as needed.
414
415 int ObjectMonitor::TryLock(Thread * Self) {
416 void * own = _owner;
417 if (own != NULL) return 0;
418 if (Atomic::replace_if_null(Self, &_owner)) {
419 // Either guarantee _recursions == 0 or set _recursions = 0.
420 assert(_recursions == 0, "invariant");
421 assert(_owner == Self, "invariant");
422 return 1;
423 }
424 // The lock had been free momentarily, but we lost the race to the lock.
425 // Interference -- the CAS failed.
426 // We can either return -1 or retry.
427 // Retry doesn't make as much sense because the lock was just acquired.
428 return -1;
429 }
430
431 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
432 // into the header of the object associated with the monitor. This
433 // idempotent method is called by a thread that is deflating a
434 // monitor and by other threads that have detected a race with the
435 // deflation process.
436 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
437 // This function must only be called when (owner == DEFLATER_MARKER
438 // && contentions <= 0), but we can't guarantee that here because
439 // those values could change when the ObjectMonitor gets moved from
440 // the global free list to a per-thread free list.
441
442 guarantee(obj != NULL, "must be non-NULL");
443 if (object() != obj) {
444 // ObjectMonitor's object ref no longer refers to the target object
445 // so the object's header has already been restored.
446 return;
447 }
448
449 markOop dmw = header();
450 if (dmw == NULL) {
451 // ObjectMonitor's header/dmw has been cleared by the deflating
452 // thread so the object's header has already been restored.
453 return;
454 }
455
456 // A non-NULL dmw has to be either neutral (not locked and not marked)
457 // or is already participating in this restoration protocol.
458 assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
459 "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw));
460
461 markOop marked_dmw = NULL;
462 if (!dmw->is_marked() && dmw->hash() == 0) {
463 // This dmw has not yet started the restoration protocol so we
464 // mark a copy of the dmw to begin the protocol.
465 // Note: A dmw with a hashcode does not take this code path.
466 marked_dmw = dmw->set_marked();
467
468 // All of the callers to this function can be racing with each
469 // other trying to update the _header field.
470 dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
471 if (dmw == NULL) {
472 // ObjectMonitor's header/dmw has been cleared by the deflating
473 // thread so the object's header has already been restored.
474 return;
475 }
476 // The _header field is now marked. The winner's 'dmw' variable
477 // contains the original, unmarked header/dmw value and any
478 // losers have a marked header/dmw value that will be cleaned
479 // up below.
480 }
481
482 if (dmw->is_marked()) {
483 // Clear the mark from the header/dmw copy in preparation for
484 // possible restoration from this thread.
485 assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
486 p2i(dmw));
487 dmw = dmw->set_unmarked();
488 }
489 assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw));
490
491 // Install displaced mark word if the object's header still points
492 // to this ObjectMonitor. All racing callers to this function will
493 // reach this point, but only one can win.
494 obj->cas_set_mark(dmw, markOopDesc::encode(this));
495
496 // Note: It does not matter which thread restored the header/dmw
497 // into the object's header. The thread deflating the monitor just
498 // wanted the object's header restored and it is. The threads that
499 // detected a race with the deflation process also wanted the
500 // object's header restored before they retry their operation and
501 // because it is restored they will only retry once.
502
503 if (marked_dmw != NULL) {
504 // Clear _header to NULL if it is still marked_dmw so a racing
505 // install_displaced_markword_in_object() can bail out sooner.
506 Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
507 }
508 }
509
510 #define MAX_RECHECK_INTERVAL 1000
511
512 void ObjectMonitor::EnterI(TRAPS) {
513 Thread * const Self = THREAD;
514 assert(Self->is_Java_thread(), "invariant");
515 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
516
517 // Try the lock - TATAS
518 if (TryLock (Self) > 0) {
519 assert(_succ != Self, "invariant");
520 assert(_owner == Self, "invariant");
521 assert(_Responsible != Self, "invariant");
522 return;
523 }
524
525 if (_owner == DEFLATER_MARKER) {
526 // The deflation protocol finished the first part (setting _owner), but
527 // it failed the second part (making _contentions negative) and bailed.
528 // Because we're called from enter() we have at least one contention.
529 guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
530 "should have been handled by the caller: contentions=%d",
531 _contentions);
532 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
533 // Acquired the monitor.
534 assert(_succ != Self, "invariant");
535 assert(_Responsible != Self, "invariant");
536 return;
537 }
538 }
539
540 assert(InitDone, "Unexpectedly not initialized");
541
542 // We try one round of spinning *before* enqueueing Self.
543 //
544 // If the _owner is ready but OFFPROC we could use a YieldTo()
545 // operation to donate the remainder of this thread's quantum
546 // to the owner. This has subtle but beneficial affinity
547 // effects.
548
549 if (TrySpin(Self) > 0) {
550 assert(_owner == Self, "invariant");
551 assert(_succ != Self, "invariant");
552 assert(_Responsible != Self, "invariant");
553 return;
554 }
637 for (;;) {
638
639 if (TryLock(Self) > 0) break;
640 assert(_owner != Self, "invariant");
641
642 // park self
643 if (_Responsible == Self) {
644 Self->_ParkEvent->park((jlong) recheckInterval);
645 // Increase the recheckInterval, but clamp the value.
646 recheckInterval *= 8;
647 if (recheckInterval > MAX_RECHECK_INTERVAL) {
648 recheckInterval = MAX_RECHECK_INTERVAL;
649 }
650 } else {
651 Self->_ParkEvent->park();
652 }
653
654 if (TryLock(Self) > 0) break;
655
656 if (_owner == DEFLATER_MARKER) {
657 // The deflation protocol finished the first part (setting _owner), but
658 // it failed the second part (making _contentions negative) and bailed.
659 // Because we're called from enter() we have at least one contention.
660 guarantee(_contentions > 0, "owner == DEFLATER_MARKER && contentions <= 0 "
661 "should have been handled by the caller: contentions=%d",
662 _contentions);
663 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
664 // Acquired the monitor.
665 break;
666 }
667 }
668
669 // The lock is still contested.
670 // Keep a tally of the # of futile wakeups.
671 // Note that the counter is not protected by a lock or updated by atomics.
672 // That is by design - we trade "lossy" counters which are exposed to
673 // races during updates for a lower probe effect.
674
675 // This PerfData object can be used in parallel with a safepoint.
676 // See the work around in PerfDataManager::destroy().
677 OM_PERFDATA_OP(FutileWakeups, inc());
678 ++nWakeups;
679
680 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
681 // We can defer clearing _succ until after the spin completes
682 // TrySpin() must tolerate being called with _succ == Self.
771
772 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
773 assert(Self != NULL, "invariant");
774 assert(SelfNode != NULL, "invariant");
775 assert(SelfNode->_thread == Self, "invariant");
776 assert(_waiters > 0, "invariant");
777 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
778 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
779 JavaThread * jt = (JavaThread *) Self;
780
781 int nWakeups = 0;
782 for (;;) {
783 ObjectWaiter::TStates v = SelfNode->TState;
784 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
785 assert(_owner != Self, "invariant");
786
787 if (TryLock(Self) > 0) break;
788 if (TrySpin(Self) > 0) break;
789
790 if (_owner == DEFLATER_MARKER) {
791 // The deflation protocol finished the first part (setting _owner),
792 // but it will observe _waiters != 0 and will bail out. Because we're
793 // called from wait() we may or may not have any contentions.
794 guarantee(_contentions >= 0, "owner == DEFLATER_MARKER && contentions < 0 "
795 "should have been handled by the caller: contentions=%d",
796 _contentions);
797 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
798 // Acquired the monitor.
799 break;
800 }
801 }
802
803 // State transition wrappers around park() ...
804 // ReenterI() wisely defers state transitions until
805 // it's clear we must park the thread.
806 {
807 OSThreadContendState osts(Self->osthread());
808 ThreadBlockInVM tbivm(jt);
809
810 // cleared by handle_special_suspend_equivalent_condition()
811 // or java_suspend_self()
812 jt->set_suspend_equivalent();
813 Self->_ParkEvent->park();
814
815 // were we externally suspended while we were waiting?
816 for (;;) {
990 Thread * const Self = THREAD;
991 if (THREAD != _owner) {
992 if (THREAD->is_lock_owned((address) _owner)) {
993 // Transmute _owner from a BasicLock pointer to a Thread address.
994 // We don't need to hold _mutex for this transition.
995 // Non-null to Non-null is safe as long as all readers can
996 // tolerate either flavor.
997 assert(_recursions == 0, "invariant");
998 _owner = THREAD;
999 _recursions = 0;
1000 } else {
1001 // Apparent unbalanced locking ...
1002 // Naively we'd like to throw IllegalMonitorStateException.
1003 // As a practical matter we can neither allocate nor throw an
1004 // exception as ::exit() can be called from leaf routines.
1005 // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1006 // Upon deeper reflection, however, in a properly run JVM the only
1007 // way we should encounter this situation is in the presence of
1008 // unbalanced JNI locking. TODO: CheckJNICalls.
1009 // See also: CR4414101
1010 assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: "
1011 "owner=" INTPTR_FORMAT, p2i(_owner));
1012 return;
1013 }
1014 }
1015
1016 if (_recursions != 0) {
1017 _recursions--; // this is simple recursive enter
1018 return;
1019 }
1020
1021 // Invariant: after setting Responsible=null an thread must execute
1022 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1023 _Responsible = NULL;
1024
1025 #if INCLUDE_JFR
1026 // get the owner's thread id for the MonitorEnter event
1027 // if it is enabled and the thread isn't suspended
1028 if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1029 _previous_owner_tid = JFR_THREAD_ID(Self);
1030 }
1031 #endif
1483 // We redo the unpark() to ensure forward progress, i.e., we
1484 // don't want all pending threads hanging (parked) with none
1485 // entering the unlocked monitor.
1486 node._event->unpark();
1487 }
1488 }
1489
1490 if (event.should_commit()) {
1491 post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
1492 }
1493
1494 OrderAccess::fence();
1495
1496 assert(Self->_Stalled != 0, "invariant");
1497 Self->_Stalled = 0;
1498
1499 assert(_owner != Self, "invariant");
1500 ObjectWaiter::TStates v = node.TState;
1501 if (v == ObjectWaiter::TS_RUN) {
1502 const bool success = enter(Self);
1503 ADIM_guarantee(success, "enter signaled for a retry, but monitor should not have been deflated as waiters > 0");
1504 } else {
1505 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
1506 ReenterI(Self, &node);
1507 node.wait_reenter_end(this);
1508 }
1509
1510 // Self has reacquired the lock.
1511 // Lifecycle - the node representing Self must not appear on any queues.
1512 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
1513 // want residual elements associated with this thread left on any lists.
1514 guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
1515 assert(_owner == Self, "invariant");
1516 assert(_succ != Self, "invariant");
1517 } // OSThreadWaitState()
1518
1519 jt->set_current_waiting_monitor(NULL);
1520
1521 guarantee(_recursions == 0, "invariant");
1522 _recursions = save; // restore the old recursion count
1523 _waiters--; // decrement the number of waiters
2072
2073 ObjectMonitorHandle::~ObjectMonitorHandle() {
2074 if (_om_ptr != NULL) {
2075 _om_ptr->dec_ref_count();
2076 _om_ptr = NULL;
2077 }
2078 }
2079
2080 // Save the ObjectMonitor* associated with the specified markOop and
2081 // increment the ref_count. This function should only be called if
2082 // the caller has verified mark->has_monitor() == true. The object
2083 // parameter is needed to verify that ObjectMonitor* has not been
2084 // deflated and reused for another object.
2085 //
2086 // This function returns true if the ObjectMonitor* has been safely
2087 // saved. This function returns false if we have lost a race with
2088 // async deflation; the caller should retry as appropriate.
2089 //
2090 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2091 guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2092 p2i(mark));
2093
2094 ObjectMonitor * om_ptr = mark->monitor();
2095 om_ptr->inc_ref_count();
2096
2097 if (AsyncDeflateIdleMonitors) {
2098 // Race here if monitor is not owned! The above ref_count bump
2099 // will cause subsequent async deflation to skip it. However,
2100 // previous or concurrent async deflation is a race.
2101 if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->_contentions <= 0) {
2102 // Async deflation is in progress. Attempt to restore the
2103 // header/dmw to the object's header so that we only retry once
2104 // if the deflater thread happens to be slow.
2105 om_ptr->install_displaced_markword_in_object(object);
2106 om_ptr->dec_ref_count();
2107 return false;
2108 }
2109 // The ObjectMonitor could have been deflated and reused for
2110 // another object before we bumped the ref_count so make sure
2111 // our object still refers to this ObjectMonitor.
2112 const markOop tmp = object->mark();
2113 if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2114 // Async deflation and reuse won the race so we have to retry.
2115 // Skip object header restoration since that's already done.
2116 om_ptr->dec_ref_count();
2117 return false;
2118 }
2119 }
2120
2121 guarantee(_om_ptr == NULL, "sanity check: _om_ptr=" INTPTR_FORMAT,
2122 p2i(_om_ptr));
2123 _om_ptr = om_ptr;
2124 return true;
2125 }
2126
2127 // For internal use by ObjectSynchronizer::inflate().
2128 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2129 if (_om_ptr == NULL) {
2130 guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2131 om_ptr->inc_ref_count();
2132 _om_ptr = om_ptr;
2133 } else {
2134 guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2135 _om_ptr->dec_ref_count();
2136 _om_ptr = NULL;
2137 }
2138 }
|