/* * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP #define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP inline intptr_t ObjectMonitor::is_entered(TRAPS) const { if (THREAD == _owner || THREAD->is_lock_owned((address) _owner)) { return 1; } return 0; } inline markOop ObjectMonitor::header() const { return _header; } inline volatile markOop* ObjectMonitor::header_addr() { assert((intptr_t)this == (intptr_t)&_header, "sync code expects this"); return &_header; } inline void ObjectMonitor::set_header(markOop hdr) { _header = hdr; } inline jint ObjectMonitor::count() const { return _count; } inline jint ObjectMonitor::waiters() const { return _waiters; } // Returns NULL if DEFLATER_MARKER is observed. inline void* ObjectMonitor::owner() const { void* owner = _owner; return owner != DEFLATER_MARKER ? owner : NULL; } inline void ObjectMonitor::clear() { assert(_count == 0, "Fatal logic error in ObjectMonitor count!"); assert(_owner == NULL, "Fatal logic error in ObjectMonitor owner!"); clear_using_JT(); } inline void ObjectMonitor::clear_using_JT() { // When clearing using a JavaThread, we leave _owner == DEFLATER_MARKER // and _count < 0 to force any racing threads to retry. Unlike other // *_using_JT() functions, we cannot assert AsyncDeflateIdleMonitors // or Thread::current()->is_Java_thread() because clear() calls this // function for the rest of its checks. assert(_header != NULL, "Fatal logic error in ObjectMonitor header!"); assert(_waiters == 0, "Fatal logic error in ObjectMonitor waiters!"); assert(_recursions == 0, "Fatal logic error in ObjectMonitor recursions!"); assert(_object != NULL, "Fatal logic error in ObjectMonitor object!"); // Do not assert _ref_count == 0 here because a racing thread could // increment _ref_count, observe _owner == DEFLATER_MARKER and then // decrement _ref_count. set_allocation_state(Free); _header = NULL; _object = NULL; // Do not clear _ref_count here because _ref_count is for indicating // that the ObjectMonitor* is in use which is orthogonal to whether // the ObjectMonitor itself is in use for a locking operation. } inline void* ObjectMonitor::object() const { return _object; } inline void* ObjectMonitor::object_addr() { return (void *)(&_object); } inline void ObjectMonitor::set_object(void* obj) { _object = obj; } inline bool ObjectMonitor::check(TRAPS) { if (THREAD != _owner) { if (THREAD->is_lock_owned((address) _owner)) { _owner = THREAD; // regain ownership of inflated monitor assert (_recursions == 0, "invariant") ; } else { check_slow(THREAD); return false; } } return true; } // return number of threads contending for this monitor inline jint ObjectMonitor::contentions() const { return _count; } // Do NOT set _count = 0. There is a race such that _count could // be set while inflating prior to setting _owner // Just use Atomic::inc/dec and assert 0 when monitor put on free list inline void ObjectMonitor::set_owner(void* owner) { _owner = owner; _recursions = 0; } inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) { _allocation_state = s; } inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const { return _allocation_state; } inline bool ObjectMonitor::is_free() const { return _allocation_state == Free; } inline bool ObjectMonitor::is_active() const { return !is_free(); } inline bool ObjectMonitor::is_old() const { return _allocation_state == Old; } inline bool ObjectMonitor::is_new() const { return _allocation_state == New; } inline void ObjectMonitor::dec_ref_count() { // The decrement needs to be MO_ACQ_REL. At the moment, the Atomic::dec // backend on PPC does not yet conform to these requirements. Therefore // the decrement is simulated with an Atomic::sub(1, &addr). Without // this MO_ACQ_REL Atomic::dec simulation, AsyncDeflateIdleMonitors is // not safe. Atomic::sub((jint)1, &_ref_count); guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count); } inline void ObjectMonitor::inc_ref_count() { // The increment needs to be MO_SEQ_CST. At the moment, the Atomic::inc // backend on PPC does not yet conform to these requirements. Therefore // the increment is simulated with a load phi; cas phi + 1; loop. // Without this MO_SEQ_CST Atomic::inc simulation, AsyncDeflateIdleMonitors // is not safe. for (;;) { jint sample = OrderAccess::load_acquire(&_ref_count); guarantee(sample >= 0, "sanity check: sample=%d", (int)sample); if (Atomic::cmpxchg(sample + 1, &_ref_count, sample) == sample) { // Incremented _ref_count without interference. return; } // Implied else: Saw interference so loop and try again. } } inline jint ObjectMonitor::ref_count() const { return OrderAccess::load_acquire(&_ref_count); } #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP