1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_OBJECTMONITOR_HPP 26 #define SHARE_RUNTIME_OBJECTMONITOR_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/padded.hpp" 30 #include "runtime/os.hpp" 31 #include "runtime/park.hpp" 32 #include "runtime/perfData.hpp" 33 34 class ObjectMonitor; 35 36 // ObjectWaiter serves as a "proxy" or surrogate thread. 37 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific 38 // ParkEvent instead. Beware, however, that the JVMTI code 39 // knows about ObjectWaiters, so we'll have to reconcile that code. 40 // See next_waiter(), first_waiter(), etc. 41 42 class ObjectWaiter : public StackObj { 43 public: 44 enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ }; 45 enum Sorted { PREPEND, APPEND, SORTED }; 46 ObjectWaiter * volatile _next; 47 ObjectWaiter * volatile _prev; 48 Thread* _thread; 49 jlong _notifier_tid; 50 ParkEvent * _event; 51 volatile int _notified; 52 volatile TStates TState; 53 Sorted _Sorted; // List placement disposition 54 bool _active; // Contention monitoring is enabled 55 public: 56 ObjectWaiter(Thread* thread); 57 58 void wait_reenter_begin(ObjectMonitor *mon); 59 void wait_reenter_end(ObjectMonitor *mon); 60 }; 61 62 // The ObjectMonitor class implements the heavyweight version of a 63 // JavaMonitor. The lightweight BasicLock/stack lock version has been 64 // inflated into an ObjectMonitor. This inflation is typically due to 65 // contention or use of Object.wait(). 66 // 67 // WARNING: This is a very sensitive and fragile class. DO NOT make any 68 // changes unless you are fully aware of the underlying semantics. 69 // 70 // Class JvmtiRawMonitor currently inherits from ObjectMonitor so 71 // changes in this class must be careful to not break JvmtiRawMonitor. 72 // These two subsystems should be separated. 73 // 74 // ObjectMonitor Layout Overview/Highlights/Restrictions: 75 // 76 // - The _header field must be at offset 0 because the displaced header 77 // from markOop is stored there. We do not want markOop.hpp to include 78 // ObjectMonitor.hpp to avoid exposing ObjectMonitor everywhere. This 79 // means that ObjectMonitor cannot inherit from any other class nor can 80 // it use any virtual member functions. This restriction is critical to 81 // the proper functioning of the VM. 82 // - The _header and _owner fields should be separated by enough space 83 // to avoid false sharing due to parallel access by different threads. 84 // This is an advisory recommendation. 85 // - The general layout of the fields in ObjectMonitor is: 86 // _header 87 // <lightly_used_fields> 88 // <optional padding> 89 // _owner 90 // <remaining_fields> 91 // - The VM assumes write ordering and machine word alignment with 92 // respect to the _owner field and the <remaining_fields> that can 93 // be read in parallel by other threads. 94 // - Generally fields that are accessed closely together in time should 95 // be placed proximally in space to promote data cache locality. That 96 // is, temporal locality should condition spatial locality. 97 // - We have to balance avoiding false sharing with excessive invalidation 98 // from coherence traffic. As such, we try to cluster fields that tend 99 // to be _written_ at approximately the same time onto the same data 100 // cache line. 101 // - We also have to balance the natural tension between minimizing 102 // single threaded capacity misses with excessive multi-threaded 103 // coherency misses. There is no single optimal layout for both 104 // single-threaded and multi-threaded environments. 105 // 106 // - See TEST_VM(ObjectMonitor, sanity) gtest for how critical restrictions are 107 // enforced. 108 // - Adjacent ObjectMonitors should be separated by enough space to avoid 109 // false sharing. This is handled by the ObjectMonitor allocation code 110 // in synchronizer.cpp. Also see TEST_VM(SynchronizerTest, sanity) gtest. 111 // 112 // Futures notes: 113 // - Separating _owner from the <remaining_fields> by enough space to 114 // avoid false sharing might be profitable. Given 115 // http://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate 116 // we know that the CAS in monitorenter will invalidate the line 117 // underlying _owner. We want to avoid an L1 data cache miss on that 118 // same line for monitorexit. Putting these <remaining_fields>: 119 // _recursions, _EntryList, _cxq, and _succ, all of which may be 120 // fetched in the inflated unlock path, on a different cache line 121 // would make them immune to CAS-based invalidation from the _owner 122 // field. 123 // 124 // - The _recursions field should be of type int, or int32_t but not 125 // intptr_t. There's no reason to use a 64-bit type for this field 126 // in a 64-bit JVM. 127 128 class ObjectMonitor { 129 public: 130 enum { 131 OM_OK, // no error 132 OM_SYSTEM_ERROR, // operating system error 133 OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException 134 OM_INTERRUPTED, // Thread.interrupt() 135 OM_TIMED_OUT // Object.wait() timed out 136 }; 137 138 private: 139 friend class ObjectMonitorHandle; 140 friend class ObjectSynchronizer; 141 friend class ObjectWaiter; 142 friend class VMStructs; 143 144 volatile markOop _header; // displaced object header word - mark 145 void* volatile _object; // backward object pointer - strong root 146 public: 147 ObjectMonitor* FreeNext; // Free list linkage 148 private: 149 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 150 sizeof(volatile markOop) + sizeof(void * volatile) + 151 sizeof(ObjectMonitor *)); 152 protected: // protected for JvmtiRawMonitor 153 // Used by async deflation as a marker in the _owner field: 154 #define DEFLATER_MARKER reinterpret_cast<void*>(-1) 155 void * volatile _owner; // pointer to owning thread OR BasicLock 156 volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor 157 volatile intptr_t _recursions; // recursion count, 0 for first entry 158 ObjectWaiter * volatile _EntryList; // Threads blocked on entry or reentry. 159 // The list is actually composed of WaitNodes, 160 // acting as proxies for Threads. 161 private: 162 ObjectWaiter * volatile _cxq; // LL of recently-arrived threads blocked on entry. 163 Thread * volatile _succ; // Heir presumptive thread - used for futile wakeup throttling 164 Thread * volatile _Responsible; 165 166 volatile int _Spinner; // for exit->spinner handoff optimization 167 volatile int _SpinDuration; 168 169 volatile jint _contentions; // Number of active contentions in enter(). It is used by is_busy() 170 // along with other fields to determine if an ObjectMonitor can be 171 // deflated. See ObjectSynchronizer::deflate_monitor(). 172 protected: 173 ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor 174 volatile jint _waiters; // number of waiting threads 175 private: 176 volatile int _WaitSetLock; // protects Wait Queue - simple spinlock 177 volatile jint _ref_count; // ref count for ObjectMonitor* 178 typedef enum { 179 Free = 0, // Free must be 0 for monitor to be free after memset(..,0,..). 180 New, 181 Old 182 } AllocationState; 183 AllocationState _allocation_state; 184 185 public: 186 static void Initialize(); 187 188 // Only perform a PerfData operation if the PerfData object has been 189 // allocated and if the PerfDataManager has not freed the PerfData 190 // objects which can happen at normal VM shutdown. 191 // 192 #define OM_PERFDATA_OP(f, op_str) \ 193 do { \ 194 if (ObjectMonitor::_sync_ ## f != NULL && \ 195 PerfDataManager::has_PerfData()) { \ 196 ObjectMonitor::_sync_ ## f->op_str; \ 197 } \ 198 } while (0) 199 200 static PerfCounter * _sync_ContendedLockAttempts; 201 static PerfCounter * _sync_FutileWakeups; 202 static PerfCounter * _sync_Parks; 203 static PerfCounter * _sync_Notifications; 204 static PerfCounter * _sync_Inflations; 205 static PerfCounter * _sync_Deflations; 206 static PerfLongVariable * _sync_MonExtant; 207 208 static int Knob_SpinLimit; 209 210 void* operator new (size_t size) throw(); 211 void* operator new[] (size_t size) throw(); 212 void operator delete(void* p); 213 void operator delete[] (void *p); 214 215 // TODO-FIXME: the "offset" routines should return a type of off_t instead of int ... 216 // ByteSize would also be an appropriate type. 217 static int header_offset_in_bytes() { return offset_of(ObjectMonitor, _header); } 218 static int object_offset_in_bytes() { return offset_of(ObjectMonitor, _object); } 219 static int owner_offset_in_bytes() { return offset_of(ObjectMonitor, _owner); } 220 static int recursions_offset_in_bytes() { return offset_of(ObjectMonitor, _recursions); } 221 static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq); } 222 static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); } 223 static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); } 224 225 // ObjectMonitor references can be ORed with markOopDesc::monitor_value 226 // as part of the ObjectMonitor tagging mechanism. When we combine an 227 // ObjectMonitor reference with an offset, we need to remove the tag 228 // value in order to generate the proper address. 229 // 230 // We can either adjust the ObjectMonitor reference and then add the 231 // offset or we can adjust the offset that is added to the ObjectMonitor 232 // reference. The latter avoids an AGI (Address Generation Interlock) 233 // stall so the helper macro adjusts the offset value that is returned 234 // to the ObjectMonitor reference manipulation code: 235 // 236 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \ 237 ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value) 238 239 markOop header() const; 240 volatile markOop* header_addr(); 241 void set_header(markOop hdr); 242 243 intptr_t is_busy() const { 244 // TODO-FIXME: assert _owner == null implies _recursions = 0 245 // We do not include _ref_count in the is_busy() check because 246 // _ref_count is for indicating that the ObjectMonitor* is in 247 // use which is orthogonal to whether the ObjectMonitor itself 248 // is in use for a locking operation. 249 return _contentions|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList); 250 } 251 252 // Version of is_busy() that accounts for special values in 253 // _contentions and _owner when AsyncDeflateIdleMonitors is enabled. 254 intptr_t is_busy_async() const { 255 intptr_t ret_code = _waiters | intptr_t(_cxq) | intptr_t(_EntryList); 256 if (!AsyncDeflateIdleMonitors) { 257 ret_code |= _contentions | intptr_t(_owner); 258 } else { 259 if (_contentions > 0) { 260 ret_code |= _contentions; 261 } 262 if (_owner != DEFLATER_MARKER) { 263 ret_code |= intptr_t(_owner); 264 } 265 } 266 return ret_code; 267 } 268 269 intptr_t is_entered(Thread* current) const; 270 271 void* owner() const; // Returns NULL if DEFLATER_MARKER is observed. 272 void set_owner(void* owner); 273 274 jint waiters() const; 275 276 jint contentions() const; 277 intptr_t recursions() const { return _recursions; } 278 279 // JVM/TI GetObjectMonitorUsage() needs this: 280 ObjectWaiter* first_waiter() { return _WaitSet; } 281 ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; } 282 Thread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; } 283 284 protected: 285 // We don't typically expect or want the ctors or dtors to run. 286 // normal ObjectMonitors are type-stable and immortal. 287 ObjectMonitor() { ::memset((void *)this, 0, sizeof(*this)); } 288 289 ~ObjectMonitor() { 290 // TODO: Add asserts ... 291 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 292 // _contentions == 0 _EntryList == NULL etc 293 } 294 295 private: 296 void Recycle() { 297 // TODO: add stronger asserts ... 298 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 299 // _contentions == 0 EntryList == NULL 300 // _recursions == 0 _WaitSet == NULL 301 assert(((is_busy()|_recursions) == 0), "freeing inuse monitor"); 302 _succ = NULL; 303 _EntryList = NULL; 304 _cxq = NULL; 305 _WaitSet = NULL; 306 _recursions = 0; 307 } 308 309 public: 310 311 void* object() const; 312 void* object_addr(); 313 void set_object(void* obj); 314 void set_allocation_state(AllocationState s); 315 AllocationState allocation_state() const; 316 bool is_free() const; 317 bool is_active() const; 318 bool is_old() const; 319 bool is_new() const; 320 void dec_ref_count(); 321 void inc_ref_count(); 322 jint ref_count() const; 323 324 bool check(TRAPS); // true if the thread owns the monitor. 325 void check_slow(TRAPS); 326 void clear(); 327 void clear_using_JT(); 328 329 bool enter(TRAPS); // Returns false if monitor is being async deflated and caller should retry locking the object. 330 void exit(bool not_suspended, TRAPS); 331 void wait(jlong millis, bool interruptable, TRAPS); 332 void notify(TRAPS); 333 void notifyAll(TRAPS); 334 335 // Use the following at your own risk 336 intptr_t complete_exit(TRAPS); 337 bool reenter(intptr_t recursions, TRAPS); // Returns false if monitor is being async deflated and caller should retry locking the object. 338 339 private: 340 void AddWaiter(ObjectWaiter * waiter); 341 void INotify(Thread * Self); 342 ObjectWaiter * DequeueWaiter(); 343 void DequeueSpecificWaiter(ObjectWaiter * waiter); 344 void EnterI(TRAPS); 345 void ReenterI(Thread * Self, ObjectWaiter * SelfNode); 346 void UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode); 347 int TryLock(Thread * Self); 348 int NotRunnable(Thread * Self, Thread * Owner); 349 int TrySpin(Thread * Self); 350 void ExitEpilog(Thread * Self, ObjectWaiter * Wakee); 351 bool ExitSuspendEquivalent(JavaThread * Self); 352 void install_displaced_markword_in_object(const oop obj); 353 }; 354 355 // A helper object for managing an ObjectMonitor*'s ref_count. There 356 // are special safety considerations when async deflation is used. 357 class ObjectMonitorHandle : public StackObj { 358 private: 359 ObjectMonitor * _om_ptr; 360 public: 361 ObjectMonitorHandle() { _om_ptr = NULL; } 362 ~ObjectMonitorHandle(); 363 364 ObjectMonitor * om_ptr() const { return _om_ptr; } 365 // Save the ObjectMonitor* associated with the specified markOop and 366 // increment the ref_count. 367 bool save_om_ptr(oop object, markOop mark); 368 369 // For internal used by ObjectSynchronizer::monitors_iterate(). 370 ObjectMonitorHandle(ObjectMonitor * _om_ptr); 371 // For internal use by ObjectSynchronizer::inflate(). 372 void set_om_ptr(ObjectMonitor * om_ptr); 373 }; 374 375 // Macro to use guarantee() for more strict AsyncDeflateIdleMonitors 376 // checks and assert() otherwise. 377 #define ADIM_guarantee(p, ...) \ 378 do { \ 379 if (AsyncDeflateIdleMonitors) { \ 380 guarantee(p, __VA_ARGS__); \ 381 } else { \ 382 assert(p, __VA_ARGS__); \ 383 } \ 384 } while (0) 385 386 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP