1 /*
   2  * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
  26 #define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
  27 
  28 inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
  29   if (THREAD == _owner || THREAD->is_lock_owned((address) _owner)) {
  30     return 1;
  31   }
  32   return 0;
  33 }
  34 
  35 inline markOop ObjectMonitor::header() const {
  36   return _header;
  37 }
  38 
  39 inline volatile markOop* ObjectMonitor::header_addr() {
  40   assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
  41   return &_header;
  42 }
  43 
  44 inline void ObjectMonitor::set_header(markOop hdr) {
  45   _header = hdr;
  46 }
  47 
  48 inline jint ObjectMonitor::count() const {
  49   return _count;
  50 }
  51 
  52 inline jint ObjectMonitor::waiters() const {
  53   return _waiters;
  54 }
  55 
  56 // Returns NULL if DEFLATER_MARKER is observed.
  57 inline void* ObjectMonitor::owner() const {
  58   void* owner = _owner;
  59   return owner != DEFLATER_MARKER ? owner : NULL;
  60 }
  61 
  62 inline void ObjectMonitor::clear() {
  63   assert(_count == 0, "Fatal logic error in ObjectMonitor count!");
  64   assert(_owner == NULL, "Fatal logic error in ObjectMonitor owner!");
  65 
  66   clear_using_JT();
  67 }
  68 
  69 inline void ObjectMonitor::clear_using_JT() {
  70   // When clearing using a JavaThread, we leave _owner == DEFLATER_MARKER
  71   // and _count < 0 to force any racing threads to retry. Unlike other
  72   // *_using_JT() functions, we cannot assert AsyncDeflateIdleMonitors
  73   // or Thread::current()->is_Java_thread() because clear() calls this
  74   // function for the rest of its checks.
  75 
  76   assert(_header != NULL, "Fatal logic error in ObjectMonitor header!");
  77   assert(_waiters == 0, "Fatal logic error in ObjectMonitor waiters!");
  78   assert(_recursions == 0, "Fatal logic error in ObjectMonitor recursions!");
  79   assert(_object != NULL, "Fatal logic error in ObjectMonitor object!");
  80   // Do not assert _ref_count == 0 here because a racing thread could
  81   // increment _ref_count, observe _owner == DEFLATER_MARKER and then
  82   // decrement _ref_count.
  83 
  84   set_allocation_state(Free);
  85   _header = NULL;
  86   _object = NULL;
  87   // Do not clear _ref_count here because _ref_count is for indicating
  88   // that the ObjectMonitor* is in use which is orthogonal to whether
  89   // the ObjectMonitor itself is in use for a locking operation.
  90 }
  91 
  92 inline void* ObjectMonitor::object() const {
  93   return _object;
  94 }
  95 
  96 inline void* ObjectMonitor::object_addr() {
  97   return (void *)(&_object);
  98 }
  99 
 100 inline void ObjectMonitor::set_object(void* obj) {
 101   _object = obj;
 102 }
 103 
 104 inline bool ObjectMonitor::check(TRAPS) {
 105   if (THREAD != _owner) {
 106     if (THREAD->is_lock_owned((address) _owner)) {
 107       _owner = THREAD;  // regain ownership of inflated monitor
 108       assert (_recursions == 0, "invariant") ;
 109     } else {
 110       check_slow(THREAD);
 111       return false;
 112     }
 113   }
 114   return true;
 115 }
 116 
 117 // return number of threads contending for this monitor
 118 inline jint ObjectMonitor::contentions() const {
 119   return _count;
 120 }
 121 
 122 // Do NOT set _count = 0. There is a race such that _count could
 123 // be set while inflating prior to setting _owner
 124 // Just use Atomic::inc/dec and assert 0 when monitor put on free list
 125 inline void ObjectMonitor::set_owner(void* owner) {
 126   _owner = owner;
 127   _recursions = 0;
 128 }
 129 
 130 inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) {
 131   _allocation_state = s;
 132 }
 133 
 134 inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const {
 135   return _allocation_state;
 136 }
 137 
 138 inline bool ObjectMonitor::is_free() const {
 139   return _allocation_state == Free;
 140 }
 141 
 142 inline bool ObjectMonitor::is_active() const {
 143   return !is_free();
 144 }
 145 
 146 inline bool ObjectMonitor::is_old() const {
 147   return _allocation_state == Old;
 148 }
 149 
 150 inline bool ObjectMonitor::is_new() const {
 151   return _allocation_state == New;
 152 }
 153 
 154 inline void ObjectMonitor::dec_ref_count() {
 155   // The decrement needs to be MO_ACQ_REL. At the moment, the Atomic::dec
 156   // backend on PPC does not yet conform to these requirements. Therefore
 157   // the decrement is simulated with an Atomic::sub(1, &addr). Without
 158   // this MO_ACQ_REL Atomic::dec simulation, AsyncDeflateIdleMonitors is
 159   // not safe.
 160   Atomic::sub((jint)1, &_ref_count);
 161   guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count);
 162 }
 163 
 164 inline void ObjectMonitor::inc_ref_count() {
 165   // The increment needs to be MO_SEQ_CST. At the moment, the Atomic::inc
 166   // backend on PPC does not yet conform to these requirements. Therefore
 167   // the increment is simulated with a load phi; cas phi + 1; loop.
 168   // Without this MO_SEQ_CST Atomic::inc simulation, AsyncDeflateIdleMonitors
 169   // is not safe.
 170   for (;;) {
 171     jint sample = OrderAccess::load_acquire(&_ref_count);
 172     guarantee(sample >= 0, "sanity check: sample=%d", (int)sample);
 173     if (Atomic::cmpxchg(sample + 1, &_ref_count, sample) == sample) {
 174       // Incremented _ref_count without interference.
 175       return;
 176     }
 177     // Implied else: Saw interference so loop and try again.
 178   }
 179 }
 180 
 181 inline jint ObjectMonitor::ref_count() const {
 182   return OrderAccess::load_acquire(&_ref_count);
 183 }
 184 
 185 #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP