1 /* 2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP 26 #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP 27 28 #include "oops/markOop.hpp" 29 #include "runtime/basicLock.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/perfData.hpp" 32 33 class ObjectMonitor; 34 35 36 class ParallelObjectSynchronizerIterator VALUE_OBJ_CLASS_SPEC { 37 friend class ObjectSynchronizer; 38 39 private: 40 ObjectMonitor* volatile _cur; 41 42 private: 43 ParallelObjectSynchronizerIterator(ObjectMonitor* head); 44 ObjectMonitor* claim(); 45 46 public: 47 bool parallel_oops_do(OopClosure* f); 48 }; 49 50 51 class ObjectSynchronizer : AllStatic { 52 friend class VMStructs; 53 friend class ParallelObjectSynchronizerIterator; 54 public: 55 typedef enum { 56 owner_self, 57 owner_none, 58 owner_other 59 } LockOwnership; 60 61 typedef enum { 62 inflate_cause_vm_internal = 0, 63 inflate_cause_monitor_enter = 1, 64 inflate_cause_wait = 2, 65 inflate_cause_notify = 3, 66 inflate_cause_hash_code = 4, 67 inflate_cause_jni_enter = 5, 68 inflate_cause_jni_exit = 6, 69 inflate_cause_nof = 7 // Number of causes 70 } InflateCause; 71 72 // exit must be implemented non-blocking, since the compiler cannot easily handle 73 // deoptimization at monitor exit. Hence, it does not take a Handle argument. 74 75 // This is full version of monitor enter and exit. I choose not 76 // to use enter() and exit() in order to make sure user be ware 77 // of the performance and semantics difference. They are normally 78 // used by ObjectLocker etc. The interpreter and compiler use 79 // assembly copies of these routines. Please keep them synchronized. 80 // 81 // attempt_rebias flag is used by UseBiasedLocking implementation 82 static void fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, 83 TRAPS); 84 static void fast_exit(oop obj, BasicLock* lock, Thread* THREAD); 85 86 // WARNING: They are ONLY used to handle the slow cases. They should 87 // only be used when the fast cases failed. Use of these functions 88 // without previous fast case check may cause fatal error. 89 static void slow_enter(Handle obj, BasicLock* lock, TRAPS); 90 static void slow_exit(oop obj, BasicLock* lock, Thread* THREAD); 91 92 // Used only to handle jni locks or other unmatched monitor enter/exit 93 // Internally they will use heavy weight monitor. 94 static void jni_enter(Handle obj, TRAPS); 95 static void jni_exit(oop obj, Thread* THREAD); 96 97 // Handle all interpreter, compiler and jni cases 98 static int wait(Handle obj, jlong millis, TRAPS); 99 static void notify(Handle obj, TRAPS); 100 static void notifyall(Handle obj, TRAPS); 101 102 static bool quick_notify(oopDesc* obj, Thread* Self, bool All); 103 static bool quick_enter(oop obj, Thread* Self, BasicLock* Lock); 104 105 // Special internal-use-only method for use by JVM infrastructure 106 // that needs to wait() on a java-level object but that can't risk 107 // throwing unexpected InterruptedExecutionExceptions. 108 static void waitUninterruptibly(Handle obj, jlong Millis, Thread * THREAD); 109 110 // used by classloading to free classloader object lock, 111 // wait on an internal lock, and reclaim original lock 112 // with original recursion count 113 static intptr_t complete_exit(Handle obj, TRAPS); 114 static void reenter (Handle obj, intptr_t recursion, TRAPS); 115 116 // thread-specific and global objectMonitor free list accessors 117 static void verifyInUse(Thread * Self); 118 static ObjectMonitor * omAlloc(Thread * Self); 119 static void omRelease(Thread * Self, ObjectMonitor * m, 120 bool FromPerThreadAlloc); 121 static void omFlush(Thread * Self); 122 123 // Inflate light weight monitor to heavy weight monitor 124 static ObjectMonitor* inflate(Thread * Self, oop obj, const InflateCause cause); 125 // This version is only for internal use 126 static ObjectMonitor* inflate_helper(oop obj); 127 static const char* inflate_cause_name(const InflateCause cause); 128 129 // Returns the identity hash value for an oop 130 // NOTE: It may cause monitor inflation 131 static intptr_t identity_hash_value_for(Handle obj); 132 static intptr_t FastHashCode(Thread * Self, oop obj); 133 134 // java.lang.Thread support 135 static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj); 136 static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj); 137 138 static JavaThread* get_lock_owner(Handle h_obj, bool doLock); 139 140 // JNI detach support 141 static void release_monitors_owned_by_thread(TRAPS); 142 static void monitors_iterate(MonitorClosure* m); 143 144 // GC: we current use aggressive monitor deflation policy 145 // Basically we deflate all monitors that are not busy. 146 // An adaptive profile-based deflation policy could be used if needed 147 // When deflate_tl is true, also deflate thread-local monitors. Otherwise only 148 // deflate global monitors. 149 static void deflate_idle_monitors(bool deflate_tl); 150 static void deflate_idle_monitors_and_oops_do(Thread* thread, OopClosure* cl); 151 static void deflate_idle_monitors_all_threads(); 152 153 // For a given monitor list: global or per-thread, deflate idle monitors 154 static int deflate_monitor_list(ObjectMonitor** listheadp, 155 ObjectMonitor** freeHeadp, 156 ObjectMonitor** freeTailp, 157 OopClosure* cl = NULL); 158 static bool deflate_monitor(ObjectMonitor* mid, oop obj, 159 ObjectMonitor** freeHeadp, 160 ObjectMonitor** freeTailp); 161 static void oops_do(OopClosure* f); 162 // Process oops in thread local used monitors 163 static void thread_local_used_oops_do(Thread* thread, OopClosure* f); 164 165 // Parallel GC support 166 static ParallelObjectSynchronizerIterator parallel_iterator(); 167 168 // debugging 169 static void sanity_checks(const bool verbose, 170 const unsigned int cache_line_size, 171 int *error_cnt_ptr, int *warning_cnt_ptr); 172 static void verify() PRODUCT_RETURN; 173 static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; 174 175 private: 176 enum { _BLOCKSIZE = 128 }; 177 // global list of blocks of monitors 178 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 179 // want to expose the PaddedEnd template more than necessary. 180 static ObjectMonitor * volatile gBlockList; 181 // global monitor free list 182 static ObjectMonitor * volatile gFreeList; 183 // global monitor in-use list, for moribund threads, 184 // monitors they inflated need to be scanned for deflation 185 static ObjectMonitor * volatile gOmInUseList; 186 // count of entries in gOmInUseList 187 static int gOmInUseCount; 188 189 // Process oops in all monitors 190 static void global_oops_do(OopClosure* f); 191 // Process oops in all global used monitors (i.e. moribund thread's monitors) 192 static void global_used_oops_do(OopClosure* f); 193 // Process oops in monitors on the given list 194 static void list_oops_do(ObjectMonitor* list, OopClosure* f); 195 196 }; 197 198 // ObjectLocker enforced balanced locking and can never thrown an 199 // IllegalMonitorStateException. However, a pending exception may 200 // have to pass through, and we must also be able to deal with 201 // asynchronous exceptions. The caller is responsible for checking 202 // the threads pending exception if needed. 203 // doLock was added to support classloading with UnsyncloadClass which 204 // requires flag based choice of locking the classloader lock. 205 class ObjectLocker : public StackObj { 206 private: 207 Thread* _thread; 208 Handle _obj; 209 BasicLock _lock; 210 bool _dolock; // default true 211 public: 212 ObjectLocker(Handle obj, Thread* thread, bool doLock = true); 213 ~ObjectLocker(); 214 215 // Monitor behavior 216 void wait(TRAPS) { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever 217 void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); } 218 void waitUninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); } 219 // complete_exit gives up lock completely, returning recursion count 220 // reenter reclaims lock with original recursion count 221 intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, THREAD); } 222 void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); } 223 }; 224 225 #endif // SHARE_VM_RUNTIME_SYNCHRONIZER_HPP