1 /* 2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP 26 #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP 27 28 #include "oops/markOop.hpp" 29 #include "runtime/handles.hpp" 30 #include "runtime/perfData.hpp" 31 #include "utilities/top.hpp" 32 33 class BasicLock VALUE_OBJ_CLASS_SPEC { 34 friend class VMStructs; 35 private: 36 volatile markOop _displaced_header; 37 public: 38 markOop displaced_header() const { return _displaced_header; } 39 void set_displaced_header(markOop header) { _displaced_header = header; } 40 41 void print_on(outputStream* st) const; 42 43 // move a basic lock (used during deoptimization 44 void move_to(oop obj, BasicLock* dest); 45 46 static int displaced_header_offset_in_bytes() { return offset_of(BasicLock, _displaced_header); } 47 }; 48 49 // A BasicObjectLock associates a specific Java object with a BasicLock. 50 // It is currently embedded in an interpreter frame. 51 52 // Because some machines have alignment restrictions on the control stack, 53 // the actual space allocated by the interpreter may include padding words 54 // after the end of the BasicObjectLock. Also, in order to guarantee 55 // alignment of the embedded BasicLock objects on such machines, we 56 // put the embedded BasicLock at the beginning of the struct. 57 58 class BasicObjectLock VALUE_OBJ_CLASS_SPEC { 59 friend class VMStructs; 60 private: 61 BasicLock _lock; // the lock, must be double word aligned 62 oop _obj; // object holds the lock; 63 64 public: 65 // Manipulation 66 oop obj() const { return _obj; } 67 void set_obj(oop obj) { _obj = obj; } 68 BasicLock* lock() { return &_lock; } 69 70 // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks 71 // in interpreter activation frames since it includes machine-specific padding. 72 static int size() { return sizeof(BasicObjectLock)/wordSize; } 73 74 // GC support 75 void oops_do(OopClosure* f) { f->do_oop(&_obj); } 76 77 static int obj_offset_in_bytes() { return offset_of(BasicObjectLock, _obj); } 78 static int lock_offset_in_bytes() { return offset_of(BasicObjectLock, _lock); } 79 }; 80 81 class ObjectMonitor; 82 83 class ObjectSynchronizer : AllStatic { 84 friend class VMStructs; 85 public: 86 typedef enum { 87 owner_self, 88 owner_none, 89 owner_other 90 } LockOwnership; 91 // exit must be implemented non-blocking, since the compiler cannot easily handle 92 // deoptimization at monitor exit. Hence, it does not take a Handle argument. 93 94 // This is full version of monitor enter and exit. I choose not 95 // to use enter() and exit() in order to make sure user be ware 96 // of the performance and semantics difference. They are normally 97 // used by ObjectLocker etc. The interpreter and compiler use 98 // assembly copies of these routines. Please keep them synchornized. 99 // 100 // attempt_rebias flag is used by UseBiasedLocking implementation 101 static void fast_enter (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS); 102 static void fast_exit (oop obj, BasicLock* lock, Thread* THREAD); 103 104 // WARNING: They are ONLY used to handle the slow cases. They should 105 // only be used when the fast cases failed. Use of these functions 106 // without previous fast case check may cause fatal error. 107 static void slow_enter (Handle obj, BasicLock* lock, TRAPS); 108 static void slow_exit (oop obj, BasicLock* lock, Thread* THREAD); 109 110 // Used only to handle jni locks or other unmatched monitor enter/exit 111 // Internally they will use heavy weight monitor. 112 static void jni_enter (Handle obj, TRAPS); 113 static bool jni_try_enter(Handle obj, Thread* THREAD); // Implements Unsafe.tryMonitorEnter 114 static void jni_exit (oop obj, Thread* THREAD); 115 116 // Handle all interpreter, compiler and jni cases 117 static void wait (Handle obj, jlong millis, TRAPS); 118 static void notify (Handle obj, TRAPS); 119 static void notifyall (Handle obj, TRAPS); 120 121 // Special internal-use-only method for use by JVM infrastructure 122 // that needs to wait() on a java-level object but that can't risk 123 // throwing unexpected InterruptedExecutionExceptions. 124 static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ; 125 126 // used by classloading to free classloader object lock, 127 // wait on an internal lock, and reclaim original lock 128 // with original recursion count 129 static intptr_t complete_exit (Handle obj, TRAPS); 130 static void reenter (Handle obj, intptr_t recursion, TRAPS); 131 132 // thread-specific and global objectMonitor free list accessors 133 // static void verifyInUse (Thread * Self) ; too slow for general assert/debug 134 static ObjectMonitor * omAlloc (Thread * Self) ; 135 static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ; 136 static void omFlush (Thread * Self) ; 137 138 // Inflate light weight monitor to heavy weight monitor 139 static ObjectMonitor* inflate(Thread * Self, oop obj); 140 // This version is only for internal use 141 static ObjectMonitor* inflate_helper(oop obj); 142 143 // Returns the identity hash value for an oop 144 // NOTE: It may cause monitor inflation 145 static intptr_t identity_hash_value_for(Handle obj); 146 static intptr_t FastHashCode (Thread * Self, oop obj) ; 147 148 // java.lang.Thread support 149 static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj); 150 static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj); 151 152 static JavaThread* get_lock_owner(Handle h_obj, bool doLock); 153 154 // JNI detach support 155 static void release_monitors_owned_by_thread(TRAPS); 156 static void monitors_iterate(MonitorClosure* m); 157 158 // GC: we current use aggressive monitor deflation policy 159 // Basically we deflate all monitors that are not busy. 160 // An adaptive profile-based deflation policy could be used if needed 161 static void deflate_idle_monitors(); 162 static int walk_monitor_list(ObjectMonitor** listheadp, 163 ObjectMonitor** FreeHeadp, 164 ObjectMonitor** FreeTailp); 165 static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp, 166 ObjectMonitor** FreeTailp); 167 static void oops_do(OopClosure* f); 168 169 // debugging 170 static void trace_locking(Handle obj, bool is_compiled, bool is_method, bool is_locking) PRODUCT_RETURN; 171 static void verify() PRODUCT_RETURN; 172 static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; 173 174 private: 175 enum { _BLOCKSIZE = 128 }; 176 static ObjectMonitor* gBlockList; 177 static ObjectMonitor * volatile gFreeList; 178 static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned 179 static int gOmInUseCount; 180 181 public: 182 static void Initialize () ; 183 static PerfCounter * _sync_ContendedLockAttempts ; 184 static PerfCounter * _sync_FutileWakeups ; 185 static PerfCounter * _sync_Parks ; 186 static PerfCounter * _sync_EmptyNotifications ; 187 static PerfCounter * _sync_Notifications ; 188 static PerfCounter * _sync_SlowEnter ; 189 static PerfCounter * _sync_SlowExit ; 190 static PerfCounter * _sync_SlowNotify ; 191 static PerfCounter * _sync_SlowNotifyAll ; 192 static PerfCounter * _sync_FailedSpins ; 193 static PerfCounter * _sync_SuccessfulSpins ; 194 static PerfCounter * _sync_PrivateA ; 195 static PerfCounter * _sync_PrivateB ; 196 static PerfCounter * _sync_MonInCirculation ; 197 static PerfCounter * _sync_MonScavenged ; 198 static PerfCounter * _sync_Inflations ; 199 static PerfCounter * _sync_Deflations ; 200 static PerfLongVariable * _sync_MonExtant ; 201 202 public: 203 static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ; 204 205 }; 206 207 // ObjectLocker enforced balanced locking and can never thrown an 208 // IllegalMonitorStateException. However, a pending exception may 209 // have to pass through, and we must also be able to deal with 210 // asynchronous exceptions. The caller is responsible for checking 211 // the threads pending exception if needed. 212 // doLock was added to support classloading with UnsyncloadClass which 213 // requires flag based choice of locking the classloader lock. 214 class ObjectLocker : public StackObj { 215 private: 216 Thread* _thread; 217 Handle _obj; 218 BasicLock _lock; 219 bool _dolock; // default true 220 public: 221 ObjectLocker(Handle obj, Thread* thread, bool doLock = true); 222 ~ObjectLocker(); 223 224 // Monitor behavior 225 void wait (TRAPS) { ObjectSynchronizer::wait (_obj, 0, CHECK); } // wait forever 226 void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); } 227 void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);} 228 // complete_exit gives up lock completely, returning recursion count 229 // reenter reclaims lock with original recursion count 230 intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, CHECK_0); } 231 void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); } 232 }; 233 234 #endif // SHARE_VM_RUNTIME_SYNCHRONIZER_HPP