1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_SYNCHRONIZER_HPP 26 #define SHARE_RUNTIME_SYNCHRONIZER_HPP 27 28 #include "memory/padded.hpp" 29 #include "oops/markOop.hpp" 30 #include "runtime/basicLock.hpp" 31 #include "runtime/handles.hpp" 32 #include "runtime/perfData.hpp" 33 34 class ObjectMonitor; 35 class ObjectMonitorHandle; 36 class ThreadsList; 37 38 struct DeflateMonitorCounters { 39 int nInuse; // currently associated with objects 40 int nInCirculation; // extant 41 int nScavenged; // reclaimed (global and per-thread) 42 int perThreadScavenged; // per-thread scavenge total 43 double perThreadTimes; // per-thread scavenge times 44 }; 45 46 class ObjectSynchronizer : AllStatic { 47 friend class VMStructs; 48 public: 49 typedef enum { 50 owner_self, 51 owner_none, 52 owner_other 53 } LockOwnership; 54 55 typedef enum { 56 inflate_cause_vm_internal = 0, 57 inflate_cause_monitor_enter = 1, 58 inflate_cause_wait = 2, 59 inflate_cause_notify = 3, 60 inflate_cause_hash_code = 4, 61 inflate_cause_jni_enter = 5, 62 inflate_cause_jni_exit = 6, 63 inflate_cause_nof = 7 // Number of causes 64 } InflateCause; 65 66 // exit must be implemented non-blocking, since the compiler cannot easily handle 67 // deoptimization at monitor exit. Hence, it does not take a Handle argument. 68 69 // This is full version of monitor enter and exit. I choose not 70 // to use enter() and exit() in order to make sure user be ware 71 // of the performance and semantics difference. They are normally 72 // used by ObjectLocker etc. The interpreter and compiler use 73 // assembly copies of these routines. Please keep them synchronized. 74 // 75 // attempt_rebias flag is used by UseBiasedLocking implementation 76 static void fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, 77 TRAPS); 78 static void fast_exit(oop obj, BasicLock* lock, Thread* THREAD); 79 80 // WARNING: They are ONLY used to handle the slow cases. They should 81 // only be used when the fast cases failed. Use of these functions 82 // without previous fast case check may cause fatal error. 83 static void slow_enter(Handle obj, BasicLock* lock, TRAPS); 84 static void slow_exit(oop obj, BasicLock* lock, Thread* THREAD); 85 86 // Used only to handle jni locks or other unmatched monitor enter/exit 87 // Internally they will use heavy weight monitor. 88 static void jni_enter(Handle obj, TRAPS); 89 static void jni_exit(oop obj, Thread* THREAD); 90 91 // Handle all interpreter, compiler and jni cases 92 static int wait(Handle obj, jlong millis, TRAPS); 93 static void notify(Handle obj, TRAPS); 94 static void notifyall(Handle obj, TRAPS); 95 96 static bool quick_notify(oopDesc* obj, Thread* Self, bool All); 97 static bool quick_enter(oop obj, Thread* Self, BasicLock* Lock); 98 99 // Special internal-use-only method for use by JVM infrastructure 100 // that needs to wait() on a java-level object but that can't risk 101 // throwing unexpected InterruptedExecutionExceptions. 102 static void waitUninterruptibly(Handle obj, jlong Millis, Thread * THREAD); 103 104 // used by classloading to free classloader object lock, 105 // wait on an internal lock, and reclaim original lock 106 // with original recursion count 107 static intptr_t complete_exit(Handle obj, TRAPS); 108 static void reenter (Handle obj, intptr_t recursion, TRAPS); 109 110 // thread-specific and global objectMonitor free list accessors 111 static ObjectMonitor * omAlloc(Thread * Self, const InflateCause cause); 112 static void omRelease(Thread * Self, ObjectMonitor * m, 113 bool FromPerThreadAlloc); 114 static void omFlush(Thread * Self); 115 116 // Inflate light weight monitor to heavy weight monitor 117 static void inflate(ObjectMonitorHandle * omh_p, Thread * Self, oop obj, 118 const InflateCause cause); 119 // This version is only for internal use 120 static void inflate_helper(ObjectMonitorHandle * omh_p, oop obj); 121 static const char* inflate_cause_name(const InflateCause cause); 122 123 // Returns the identity hash value for an oop 124 // NOTE: It may cause monitor inflation 125 static intptr_t identity_hash_value_for(Handle obj); 126 static intptr_t FastHashCode(Thread * Self, oop obj); 127 128 // java.lang.Thread support 129 static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj); 130 static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj); 131 132 static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj); 133 134 // JNI detach support 135 static void release_monitors_owned_by_thread(TRAPS); 136 static void monitors_iterate(MonitorClosure* m); 137 138 // GC: we current use aggressive monitor deflation policy 139 // Basically we deflate all monitors that are not busy. 140 // An adaptive profile-based deflation policy could be used if needed 141 static void deflate_idle_monitors(DeflateMonitorCounters* counters); 142 static void deflate_global_idle_monitors_using_JT(); 143 static void deflate_per_thread_idle_monitors_using_JT(); 144 static void deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self); 145 static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters); 146 static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters); 147 static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters); 148 149 // For a given monitor list: global or per-thread, deflate idle monitors 150 static int deflate_monitor_list(ObjectMonitor** listheadp, 151 ObjectMonitor** freeHeadp, 152 ObjectMonitor** freeTailp); 153 // For a given in-use monitor list: global or per-thread, deflate idle 154 // monitors using a JavaThread. 155 static int deflate_monitor_list_using_JT(ObjectMonitor** listHeadp, 156 ObjectMonitor** freeHeadp, 157 ObjectMonitor** freeTailp, 158 ObjectMonitor** savedMidInUsep); 159 static bool deflate_monitor(ObjectMonitor* mid, oop obj, 160 ObjectMonitor** freeHeadp, 161 ObjectMonitor** freeTailp); 162 static bool deflate_monitor_using_JT(ObjectMonitor* mid, 163 ObjectMonitor** freeHeadp, 164 ObjectMonitor** freeTailp); 165 static bool is_async_deflation_needed(); 166 static bool is_safepoint_deflation_needed(); 167 static bool is_async_deflation_requested() { return _is_async_deflation_requested; } 168 static bool is_special_deflation_requested() { return _is_special_deflation_requested; } 169 static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; } 170 static void set_is_special_deflation_requested(bool new_value) { _is_special_deflation_requested = new_value; } 171 static jlong time_since_last_async_deflation_ms(); 172 static void oops_do(OopClosure* f); 173 // Process oops in thread local used monitors 174 static void thread_local_used_oops_do(Thread* thread, OopClosure* f); 175 176 // debugging 177 static void audit_and_print_stats(bool on_exit); 178 static void chk_free_entry(JavaThread * jt, ObjectMonitor * n, 179 outputStream * out, int *error_cnt_p); 180 static void chk_global_free_list_and_count(outputStream * out, 181 int *error_cnt_p); 182 static void chk_global_in_use_list_and_count(outputStream * out, 183 int *error_cnt_p); 184 static void chk_in_use_entry(JavaThread * jt, ObjectMonitor * n, 185 outputStream * out, int *error_cnt_p); 186 static void chk_per_thread_in_use_list_and_count(JavaThread *jt, 187 outputStream * out, 188 int *error_cnt_p); 189 static void chk_per_thread_free_list_and_count(JavaThread *jt, 190 outputStream * out, 191 int *error_cnt_p); 192 static void log_in_use_monitor_details(outputStream * out, bool on_exit); 193 static int log_monitor_list_counts(outputStream * out); 194 static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; 195 196 static void do_safepoint_work(DeflateMonitorCounters* _counters); 197 198 private: 199 friend class SynchronizerTest; 200 201 enum { _BLOCKSIZE = 128 }; 202 // global list of blocks of monitors 203 static PaddedEnd<ObjectMonitor> * volatile gBlockList; 204 // global monitor free list 205 static ObjectMonitor * volatile gFreeList; 206 // global monitor in-use list, for moribund threads, 207 // monitors they inflated need to be scanned for deflation 208 static ObjectMonitor * volatile gOmInUseList; 209 // count of entries in gOmInUseList 210 static int gOmInUseCount; 211 static volatile bool _is_async_deflation_requested; 212 static volatile bool _is_special_deflation_requested; 213 static jlong _last_async_deflation_time_ns; 214 215 // Process oops in all global used monitors (i.e. moribund thread's monitors) 216 static void global_used_oops_do(OopClosure* f); 217 // Process oops in monitors on the given list 218 static void list_oops_do(ObjectMonitor* list, OopClosure* f); 219 220 // Support for SynchronizerTest access to GVars fields: 221 static u_char* get_gvars_addr(); 222 static u_char* get_gvars_hcSequence_addr(); 223 static size_t get_gvars_size(); 224 static u_char* get_gvars_stwRandom_addr(); 225 }; 226 227 // ObjectLocker enforces balanced locking and can never throw an 228 // IllegalMonitorStateException. However, a pending exception may 229 // have to pass through, and we must also be able to deal with 230 // asynchronous exceptions. The caller is responsible for checking 231 // the thread's pending exception if needed. 232 class ObjectLocker : public StackObj { 233 private: 234 Thread* _thread; 235 Handle _obj; 236 BasicLock _lock; 237 bool _dolock; // default true 238 public: 239 ObjectLocker(Handle obj, Thread* thread, bool doLock = true); 240 ~ObjectLocker(); 241 242 // Monitor behavior 243 void wait(TRAPS) { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever 244 void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); } 245 void waitUninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); } 246 // complete_exit gives up lock completely, returning recursion count 247 // reenter reclaims lock with original recursion count 248 intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, THREAD); } 249 void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); } 250 }; 251 252 #endif // SHARE_RUNTIME_SYNCHRONIZER_HPP