222 // * See also http://blogs.sun.com/dave
223
224
225 void* ObjectMonitor::operator new (size_t size) throw() {
226 return AllocateHeap(size, mtInternal);
227 }
228 void* ObjectMonitor::operator new[] (size_t size) throw() {
229 return operator new (size);
230 }
231 void ObjectMonitor::operator delete(void* p) {
232 FreeHeap(p);
233 }
234 void ObjectMonitor::operator delete[] (void *p) {
235 operator delete(p);
236 }
237
238 // -----------------------------------------------------------------------------
239 // Enter support
240
241 void ObjectMonitor::enter(TRAPS) {
242 ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
243
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
249 if (cur == NULL) {
250 assert(_recursions == 0, "invariant");
251 return;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return;
258 }
259
260 if (Self->is_lock_owned ((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 // Commute owner from a thread-specific on-stack BasicLockObject address to
264 // a full-fledged "Thread *".
265 _owner = Self;
266 return;
267 }
268
269 // We've encountered genuine contention.
270 assert(Self->_Stalled == 0, "invariant");
271 Self->_Stalled = intptr_t(this);
272
273 // Try one round of spinning *before* enqueueing Self
274 // and before going through the awkward and expensive state
275 // transitions. The following spin is strictly optional ...
276 // Note that if we acquire the monitor from an initial spin
277 // we forgo posting JVMTI events and firing DTRACE probes.
278 if (TrySpin(Self) > 0) {
279 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
280 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
281 _recursions);
282 assert(((oop)object())->mark() == markOopDesc::encode(this),
283 "object mark must match encoded this: mark=" INTPTR_FORMAT
284 ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
285 p2i(markOopDesc::encode(this)));
286 Self->_Stalled = 0;
287 return;
288 }
420 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
421 // into the header of the object associated with the monitor. This
422 // idempotent method is called by a thread that is deflating a
423 // monitor and by other threads that have detected a race with the
424 // deflation process.
425 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
426 // This function must only be called when (owner == DEFLATER_MARKER
427 // && ref_count <= 0), but we can't guarantee that here because
428 // those values could change when the ObjectMonitor gets moved from
429 // the global free list to a per-thread free list.
430
431 guarantee(obj != NULL, "must be non-NULL");
432 if (object() != obj) {
433 // ObjectMonitor's object ref no longer refers to the target object
434 // so the object's header has already been restored.
435 return;
436 }
437
438 markOop dmw = header();
439 if (dmw == NULL) {
440 // ObjectMonitor's header/dmw has been cleared by the deflating
441 // thread so the object's header has already been restored.
442 return;
443 }
444
445 // A non-NULL dmw has to be either neutral (not locked and not marked)
446 // or is already participating in this restoration protocol.
447 assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
448 "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw));
449
450 markOop marked_dmw = NULL;
451 if (!dmw->is_marked() && dmw->hash() == 0) {
452 // This dmw has not yet started the restoration protocol so we
453 // mark a copy of the dmw to begin the protocol.
454 // Note: A dmw with a hashcode does not take this code path.
455 marked_dmw = dmw->set_marked();
456
457 // All of the callers to this function can be racing with each
458 // other trying to update the _header field.
459 dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
460 if (dmw == NULL) {
461 // ObjectMonitor's header/dmw has been cleared by the deflating
462 // thread so the object's header has already been restored.
463 return;
464 }
465 // The _header field is now marked. The winner's 'dmw' variable
466 // contains the original, unmarked header/dmw value and any
467 // losers have a marked header/dmw value that will be cleaned
468 // up below.
469 }
470
471 if (dmw->is_marked()) {
472 // Clear the mark from the header/dmw copy in preparation for
473 // possible restoration from this thread.
474 assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
475 p2i(dmw));
476 dmw = dmw->set_unmarked();
477 }
478 assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw));
479
480 // Install displaced mark word if the object's header still points
481 // to this ObjectMonitor. All racing callers to this function will
482 // reach this point, but only one can win.
483 obj->cas_set_mark(dmw, markOopDesc::encode(this));
484
485 // Note: It does not matter which thread restored the header/dmw
486 // into the object's header. The thread deflating the monitor just
487 // wanted the object's header restored and it is. The threads that
488 // detected a race with the deflation process also wanted the
489 // object's header restored before they retry their operation and
490 // because it is restored they will only retry once.
491
492 if (marked_dmw != NULL) {
493 // Clear _header to NULL if it is still marked_dmw so a racing
494 // install_displaced_markword_in_object() can bail out sooner.
495 Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
496 }
497 }
498
499 // Convert the fields used by is_busy() to a string that can be
500 // used for diagnostic output.
501 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
502 ss->print("is_busy: contentions=%d, waiters=%d, owner=" INTPTR_FORMAT
503 ", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, _contentions,
504 _waiters, p2i(_owner), p2i(_cxq), p2i(_EntryList));
505 return ss->base();
506 }
507
508 #define MAX_RECHECK_INTERVAL 1000
509
510 void ObjectMonitor::EnterI(TRAPS) {
511 ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
512
513 Thread * const Self = THREAD;
514 assert(Self->is_Java_thread(), "invariant");
515 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
516
517 // Try the lock - TATAS
518 if (TryLock (Self) > 0) {
519 assert(_succ != Self, "invariant");
520 assert(_owner == Self, "invariant");
521 assert(_Responsible != Self, "invariant");
522 return;
523 }
524
525 if (_owner == DEFLATER_MARKER) {
526 // The deflation protocol finished the first part (setting owner), but
527 // it failed the second part (making ref_count negative) and bailed.
528 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
529 // Acquired the monitor.
530 assert(_succ != Self, "invariant");
531 assert(_Responsible != Self, "invariant");
532 return;
533 }
534 }
535
536 assert(InitDone, "Unexpectedly not initialized");
537
538 // We try one round of spinning *before* enqueueing Self.
539 //
540 // If the _owner is ready but OFFPROC we could use a YieldTo()
541 // operation to donate the remainder of this thread's quantum
542 // to the owner. This has subtle but beneficial affinity
543 // effects.
544
545 if (TrySpin(Self) > 0) {
546 assert(_owner == Self, "invariant");
547 assert(_succ != Self, "invariant");
548 assert(_Responsible != Self, "invariant");
549 return;
550 }
551
552 // The Spin failed -- Enqueue and park the thread ...
553 assert(_succ != Self, "invariant");
554 assert(_owner != Self, "invariant");
632
633 for (;;) {
634
635 if (TryLock(Self) > 0) break;
636 assert(_owner != Self, "invariant");
637
638 // park self
639 if (_Responsible == Self) {
640 Self->_ParkEvent->park((jlong) recheckInterval);
641 // Increase the recheckInterval, but clamp the value.
642 recheckInterval *= 8;
643 if (recheckInterval > MAX_RECHECK_INTERVAL) {
644 recheckInterval = MAX_RECHECK_INTERVAL;
645 }
646 } else {
647 Self->_ParkEvent->park();
648 }
649
650 if (TryLock(Self) > 0) break;
651
652 if (_owner == DEFLATER_MARKER) {
653 // The deflation protocol finished the first part (setting owner), but
654 // it failed the second part (making ref_count negative) and bailed.
655 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
656 // Acquired the monitor.
657 break;
658 }
659 }
660
661 // The lock is still contested.
662 // Keep a tally of the # of futile wakeups.
663 // Note that the counter is not protected by a lock or updated by atomics.
664 // That is by design - we trade "lossy" counters which are exposed to
665 // races during updates for a lower probe effect.
666
667 // This PerfData object can be used in parallel with a safepoint.
668 // See the work around in PerfDataManager::destroy().
669 OM_PERFDATA_OP(FutileWakeups, inc());
670 ++nWakeups;
671
672 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
673 // We can defer clearing _succ until after the spin completes
674 // TrySpin() must tolerate being called with _succ == Self.
675 // Try yet another round of adaptive spinning.
676 if (TrySpin(Self) > 0) break;
677
678 // We can find that we were unpark()ed and redesignated _succ while
679 // we were spinning. That's harmless. If we iterate and call park(),
745 // the lock. The barrier ensures that changes to monitor meta-data and data
746 // protected by the lock will be visible before we release the lock, and
747 // therefore before some other thread (CPU) has a chance to acquire the lock.
748 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
749 //
750 // Critically, any prior STs to _succ or EntryList must be visible before
751 // the ST of null into _owner in the *subsequent* (following) corresponding
752 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
753 // execute a serializing instruction.
754
755 return;
756 }
757
758 // ReenterI() is a specialized inline form of the latter half of the
759 // contended slow-path from EnterI(). We use ReenterI() only for
760 // monitor reentry in wait().
761 //
762 // In the future we should reconcile EnterI() and ReenterI().
763
764 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
765 ADIM_guarantee(_ref_count > 0, "must be positive: ref_count=%d", _ref_count);
766
767 assert(Self != NULL, "invariant");
768 assert(SelfNode != NULL, "invariant");
769 assert(SelfNode->_thread == Self, "invariant");
770 assert(_waiters > 0, "invariant");
771 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
772 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
773 JavaThread * jt = (JavaThread *) Self;
774
775 int nWakeups = 0;
776 for (;;) {
777 ObjectWaiter::TStates v = SelfNode->TState;
778 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
779 assert(_owner != Self, "invariant");
780
781 if (TryLock(Self) > 0) break;
782 if (TrySpin(Self) > 0) break;
783
784 if (_owner == DEFLATER_MARKER) {
785 // The deflation protocol finished the first part (setting owner), but
786 // it failed the second part (making ref_count negative) and bailed.
787 if (Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
788 // Acquired the monitor.
789 break;
790 }
791 }
792
793 // State transition wrappers around park() ...
794 // ReenterI() wisely defers state transitions until
795 // it's clear we must park the thread.
796 {
797 OSThreadContendState osts(Self->osthread());
798 ThreadBlockInVM tbivm(jt);
799
800 // cleared by handle_special_suspend_equivalent_condition()
801 // or java_suspend_self()
802 jt->set_suspend_equivalent();
803 Self->_ParkEvent->park();
804
805 // were we externally suspended while we were waiting?
806 for (;;) {
807 if (!ExitSuspendEquivalent(jt)) break;
808 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
809 jt->java_suspend_self();
810 jt->set_suspend_equivalent();
811 }
2066 // increment the ref_count. This function should only be called if
2067 // the caller has verified mark->has_monitor() == true. The object
2068 // parameter is needed to verify that ObjectMonitor* has not been
2069 // deflated and reused for another object.
2070 //
2071 // This function returns true if the ObjectMonitor* has been safely
2072 // saved. This function returns false if we have lost a race with
2073 // async deflation; the caller should retry as appropriate.
2074 //
2075 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2076 guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2077 p2i(mark));
2078
2079 ObjectMonitor * om_ptr = mark->monitor();
2080 om_ptr->inc_ref_count();
2081
2082 if (AsyncDeflateIdleMonitors) {
2083 // Race here if monitor is not owned! The above ref_count bump
2084 // will cause subsequent async deflation to skip it. However,
2085 // previous or concurrent async deflation is a race.
2086 if (om_ptr->_owner == DEFLATER_MARKER && om_ptr->ref_count() <= 0) {
2087 // Async deflation is in progress and our ref_count increment
2088 // above lost the race to async deflation. Attempt to restore
2089 // the header/dmw to the object's header so that we only retry
2090 // once if the deflater thread happens to be slow.
2091 om_ptr->install_displaced_markword_in_object(object);
2092 om_ptr->dec_ref_count();
2093 return false;
2094 }
2095 // The ObjectMonitor could have been deflated and reused for
2096 // another object before we bumped the ref_count so make sure
2097 // our object still refers to this ObjectMonitor.
2098 const markOop tmp = object->mark();
2099 if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2100 // Async deflation and reuse won the race so we have to retry.
2101 // Skip object header restoration since that's already done.
2102 om_ptr->dec_ref_count();
2103 return false;
2104 }
2105 }
2106
|
222 // * See also http://blogs.sun.com/dave
223
224
225 void* ObjectMonitor::operator new (size_t size) throw() {
226 return AllocateHeap(size, mtInternal);
227 }
228 void* ObjectMonitor::operator new[] (size_t size) throw() {
229 return operator new (size);
230 }
231 void ObjectMonitor::operator delete(void* p) {
232 FreeHeap(p);
233 }
234 void ObjectMonitor::operator delete[] (void *p) {
235 operator delete(p);
236 }
237
238 // -----------------------------------------------------------------------------
239 // Enter support
240
241 void ObjectMonitor::enter(TRAPS) {
242 ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
243
244 // The following code is ordered to check the most common cases first
245 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
246 Thread * const Self = THREAD;
247
248 void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
249 if (cur == NULL) {
250 assert(_recursions == 0, "invariant");
251 return;
252 }
253
254 if (cur == Self) {
255 // TODO-FIXME: check for integer overflow! BUGID 6557169.
256 _recursions++;
257 return;
258 }
259
260 if (Self->is_lock_owned ((address)cur)) {
261 assert(_recursions == 0, "internal state error");
262 _recursions = 1;
263 // Commute owner from a thread-specific on-stack BasicLockObject address to
264 // a full-fledged "Thread *".
265 _owner = Self;
266 return;
267 }
268
269 if (AsyncDeflateIdleMonitors &&
270 Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
271 // The deflation protocol finished the first part (setting owner),
272 // but it failed the second part (making ref_count negative) and
273 // bailed. Or the ObjectMonitor was async deflated and reused.
274 // Acquired the monitor.
275 assert(_recursions == 0, "invariant");
276 return;
277 }
278
279 // We've encountered genuine contention.
280 assert(Self->_Stalled == 0, "invariant");
281 Self->_Stalled = intptr_t(this);
282
283 // Try one round of spinning *before* enqueueing Self
284 // and before going through the awkward and expensive state
285 // transitions. The following spin is strictly optional ...
286 // Note that if we acquire the monitor from an initial spin
287 // we forgo posting JVMTI events and firing DTRACE probes.
288 if (TrySpin(Self) > 0) {
289 assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
290 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT,
291 _recursions);
292 assert(((oop)object())->mark() == markOopDesc::encode(this),
293 "object mark must match encoded this: mark=" INTPTR_FORMAT
294 ", encoded this=" INTPTR_FORMAT, p2i(((oop)object())->mark()),
295 p2i(markOopDesc::encode(this)));
296 Self->_Stalled = 0;
297 return;
298 }
430 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
431 // into the header of the object associated with the monitor. This
432 // idempotent method is called by a thread that is deflating a
433 // monitor and by other threads that have detected a race with the
434 // deflation process.
435 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
436 // This function must only be called when (owner == DEFLATER_MARKER
437 // && ref_count <= 0), but we can't guarantee that here because
438 // those values could change when the ObjectMonitor gets moved from
439 // the global free list to a per-thread free list.
440
441 guarantee(obj != NULL, "must be non-NULL");
442 if (object() != obj) {
443 // ObjectMonitor's object ref no longer refers to the target object
444 // so the object's header has already been restored.
445 return;
446 }
447
448 markOop dmw = header();
449 if (dmw == NULL) {
450 // ObjectMonitor's header/dmw has been cleared so the object's
451 // header has already been restored.
452 return;
453 }
454
455 // A non-NULL dmw has to be either neutral (not locked and not marked)
456 // or is already participating in this restoration protocol.
457 assert(dmw->is_neutral() || (dmw->is_marked() && dmw->hash() == 0),
458 "failed precondition: dmw=" INTPTR_FORMAT, p2i(dmw));
459
460 markOop marked_dmw = NULL;
461 if (!dmw->is_marked() && dmw->hash() == 0) {
462 // This dmw has not yet started the restoration protocol so we
463 // mark a copy of the dmw to begin the protocol.
464 // Note: A dmw with a hashcode does not take this code path.
465 marked_dmw = dmw->set_marked();
466
467 // All of the callers to this function can be racing with each
468 // other trying to update the _header field.
469 dmw = (markOop) Atomic::cmpxchg(marked_dmw, &_header, dmw);
470 if (dmw == NULL) {
471 // ObjectMonitor's header/dmw has been cleared so the object's
472 // header has already been restored.
473 return;
474 }
475 // The _header field is now marked. The winner's 'dmw' variable
476 // contains the original, unmarked header/dmw value and any
477 // losers have a marked header/dmw value that will be cleaned
478 // up below.
479 }
480
481 if (dmw->is_marked()) {
482 // Clear the mark from the header/dmw copy in preparation for
483 // possible restoration from this thread.
484 assert(dmw->hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
485 p2i(dmw));
486 dmw = dmw->set_unmarked();
487 }
488 assert(dmw->is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, p2i(dmw));
489
490 // Install displaced mark word if the object's header still points
491 // to this ObjectMonitor. All racing callers to this function will
492 // reach this point, but only one can win.
493 obj->cas_set_mark(dmw, markOopDesc::encode(this));
494
495 // Note: It does not matter which thread restored the header/dmw
496 // into the object's header. The thread deflating the monitor just
497 // wanted the object's header restored and it is. The threads that
498 // detected a race with the deflation process also wanted the
499 // object's header restored before they retry their operation and
500 // because it is restored they will only retry once.
501
502 if (marked_dmw != NULL) {
503 // Clear _header to NULL if it is still marked_dmw so a racing
504 // install_displaced_markword_in_object() can bail out sooner.
505 Atomic::cmpxchg((markOop)NULL, &_header, marked_dmw);
506 }
507 }
508
509 // Convert the fields used by is_busy() to a string that can be
510 // used for diagnostic output.
511 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
512 ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
513 if (!AsyncDeflateIdleMonitors) {
514 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
515 } else if (_owner != DEFLATER_MARKER) {
516 ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
517 } else {
518 ss->print("owner=" INTPTR_FORMAT, NULL);
519 }
520 ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
521 p2i(_EntryList));
522 return ss->base();
523 }
524
525 #define MAX_RECHECK_INTERVAL 1000
526
527 void ObjectMonitor::EnterI(TRAPS) {
528 ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
529
530 Thread * const Self = THREAD;
531 assert(Self->is_Java_thread(), "invariant");
532 assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
533
534 // Try the lock - TATAS
535 if (TryLock (Self) > 0) {
536 assert(_succ != Self, "invariant");
537 assert(_owner == Self, "invariant");
538 assert(_Responsible != Self, "invariant");
539 return;
540 }
541
542 if (AsyncDeflateIdleMonitors &&
543 Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
544 // The deflation protocol finished the first part (setting owner),
545 // but it failed the second part (making ref_count negative) and
546 // bailed. Or the ObjectMonitor was async deflated and reused.
547 // Acquired the monitor.
548 assert(_succ != Self, "invariant");
549 assert(_Responsible != Self, "invariant");
550 return;
551 }
552
553 assert(InitDone, "Unexpectedly not initialized");
554
555 // We try one round of spinning *before* enqueueing Self.
556 //
557 // If the _owner is ready but OFFPROC we could use a YieldTo()
558 // operation to donate the remainder of this thread's quantum
559 // to the owner. This has subtle but beneficial affinity
560 // effects.
561
562 if (TrySpin(Self) > 0) {
563 assert(_owner == Self, "invariant");
564 assert(_succ != Self, "invariant");
565 assert(_Responsible != Self, "invariant");
566 return;
567 }
568
569 // The Spin failed -- Enqueue and park the thread ...
570 assert(_succ != Self, "invariant");
571 assert(_owner != Self, "invariant");
649
650 for (;;) {
651
652 if (TryLock(Self) > 0) break;
653 assert(_owner != Self, "invariant");
654
655 // park self
656 if (_Responsible == Self) {
657 Self->_ParkEvent->park((jlong) recheckInterval);
658 // Increase the recheckInterval, but clamp the value.
659 recheckInterval *= 8;
660 if (recheckInterval > MAX_RECHECK_INTERVAL) {
661 recheckInterval = MAX_RECHECK_INTERVAL;
662 }
663 } else {
664 Self->_ParkEvent->park();
665 }
666
667 if (TryLock(Self) > 0) break;
668
669 if (AsyncDeflateIdleMonitors &&
670 Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
671 // The deflation protocol finished the first part (setting owner),
672 // but it failed the second part (making ref_count negative) and
673 // bailed. Or the ObjectMonitor was async deflated and reused.
674 // Acquired the monitor.
675 break;
676 }
677
678 // The lock is still contested.
679 // Keep a tally of the # of futile wakeups.
680 // Note that the counter is not protected by a lock or updated by atomics.
681 // That is by design - we trade "lossy" counters which are exposed to
682 // races during updates for a lower probe effect.
683
684 // This PerfData object can be used in parallel with a safepoint.
685 // See the work around in PerfDataManager::destroy().
686 OM_PERFDATA_OP(FutileWakeups, inc());
687 ++nWakeups;
688
689 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
690 // We can defer clearing _succ until after the spin completes
691 // TrySpin() must tolerate being called with _succ == Self.
692 // Try yet another round of adaptive spinning.
693 if (TrySpin(Self) > 0) break;
694
695 // We can find that we were unpark()ed and redesignated _succ while
696 // we were spinning. That's harmless. If we iterate and call park(),
762 // the lock. The barrier ensures that changes to monitor meta-data and data
763 // protected by the lock will be visible before we release the lock, and
764 // therefore before some other thread (CPU) has a chance to acquire the lock.
765 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
766 //
767 // Critically, any prior STs to _succ or EntryList must be visible before
768 // the ST of null into _owner in the *subsequent* (following) corresponding
769 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
770 // execute a serializing instruction.
771
772 return;
773 }
774
775 // ReenterI() is a specialized inline form of the latter half of the
776 // contended slow-path from EnterI(). We use ReenterI() only for
777 // monitor reentry in wait().
778 //
779 // In the future we should reconcile EnterI() and ReenterI().
780
781 void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
782 ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
783
784 assert(Self != NULL, "invariant");
785 assert(SelfNode != NULL, "invariant");
786 assert(SelfNode->_thread == Self, "invariant");
787 assert(_waiters > 0, "invariant");
788 assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
789 assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
790 JavaThread * jt = (JavaThread *) Self;
791
792 int nWakeups = 0;
793 for (;;) {
794 ObjectWaiter::TStates v = SelfNode->TState;
795 guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
796 assert(_owner != Self, "invariant");
797
798 if (TryLock(Self) > 0) break;
799 if (TrySpin(Self) > 0) break;
800
801 if (AsyncDeflateIdleMonitors &&
802 Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
803 // The deflation protocol finished the first part (setting owner),
804 // but it failed the second part (making ref_count negative) and
805 // bailed. Or the ObjectMonitor was async deflated and reused.
806 // Acquired the monitor.
807 break;
808 }
809
810 // State transition wrappers around park() ...
811 // ReenterI() wisely defers state transitions until
812 // it's clear we must park the thread.
813 {
814 OSThreadContendState osts(Self->osthread());
815 ThreadBlockInVM tbivm(jt);
816
817 // cleared by handle_special_suspend_equivalent_condition()
818 // or java_suspend_self()
819 jt->set_suspend_equivalent();
820 Self->_ParkEvent->park();
821
822 // were we externally suspended while we were waiting?
823 for (;;) {
824 if (!ExitSuspendEquivalent(jt)) break;
825 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
826 jt->java_suspend_self();
827 jt->set_suspend_equivalent();
828 }
2083 // increment the ref_count. This function should only be called if
2084 // the caller has verified mark->has_monitor() == true. The object
2085 // parameter is needed to verify that ObjectMonitor* has not been
2086 // deflated and reused for another object.
2087 //
2088 // This function returns true if the ObjectMonitor* has been safely
2089 // saved. This function returns false if we have lost a race with
2090 // async deflation; the caller should retry as appropriate.
2091 //
2092 bool ObjectMonitorHandle::save_om_ptr(oop object, markOop mark) {
2093 guarantee(mark->has_monitor(), "sanity check: mark=" INTPTR_FORMAT,
2094 p2i(mark));
2095
2096 ObjectMonitor * om_ptr = mark->monitor();
2097 om_ptr->inc_ref_count();
2098
2099 if (AsyncDeflateIdleMonitors) {
2100 // Race here if monitor is not owned! The above ref_count bump
2101 // will cause subsequent async deflation to skip it. However,
2102 // previous or concurrent async deflation is a race.
2103 if (om_ptr->owner_is_DEFLATER_MARKER() && om_ptr->ref_count() <= 0) {
2104 // Async deflation is in progress and our ref_count increment
2105 // above lost the race to async deflation. Attempt to restore
2106 // the header/dmw to the object's header so that we only retry
2107 // once if the deflater thread happens to be slow.
2108 om_ptr->install_displaced_markword_in_object(object);
2109 om_ptr->dec_ref_count();
2110 return false;
2111 }
2112 // The ObjectMonitor could have been deflated and reused for
2113 // another object before we bumped the ref_count so make sure
2114 // our object still refers to this ObjectMonitor.
2115 const markOop tmp = object->mark();
2116 if (!tmp->has_monitor() || tmp->monitor() != om_ptr) {
2117 // Async deflation and reuse won the race so we have to retry.
2118 // Skip object header restoration since that's already done.
2119 om_ptr->dec_ref_count();
2120 return false;
2121 }
2122 }
2123
|