1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_SYNCHRONIZER_HPP 26 #define SHARE_RUNTIME_SYNCHRONIZER_HPP 27 28 #include "memory/padded.hpp" 29 #include "oops/markWord.hpp" 30 #include "runtime/basicLock.hpp" 31 #include "runtime/handles.hpp" 32 #include "runtime/perfData.hpp" 33 34 class ObjectMonitor; 35 class ThreadsList; 36 37 typedef PaddedEnd<ObjectMonitor, DEFAULT_CACHE_LINE_SIZE> PaddedObjectMonitor; 38 39 struct DeflateMonitorCounters { 40 int n_in_use; // currently associated with objects 41 int n_in_circulation; // extant 42 int n_scavenged; // reclaimed (global and per-thread) 43 int per_thread_scavenged; // per-thread scavenge total 44 double per_thread_times; // per-thread scavenge times 45 }; 46 47 class ObjectSynchronizer : AllStatic { 48 friend class VMStructs; 49 public: 50 typedef enum { 51 owner_self, 52 owner_none, 53 owner_other 54 } LockOwnership; 55 56 typedef enum { 57 inflate_cause_vm_internal = 0, 58 inflate_cause_monitor_enter = 1, 59 inflate_cause_wait = 2, 60 inflate_cause_notify = 3, 61 inflate_cause_hash_code = 4, 62 inflate_cause_jni_enter = 5, 63 inflate_cause_jni_exit = 6, 64 inflate_cause_nof = 7 // Number of causes 65 } InflateCause; 66 67 // exit must be implemented non-blocking, since the compiler cannot easily handle 68 // deoptimization at monitor exit. Hence, it does not take a Handle argument. 69 70 // This is the "slow path" version of monitor enter and exit. 71 static void enter(Handle obj, BasicLock* lock, TRAPS); 72 static void exit(oop obj, BasicLock* lock, Thread* THREAD); 73 74 // Used only to handle jni locks or other unmatched monitor enter/exit 75 // Internally they will use heavy weight monitor. 76 static void jni_enter(Handle obj, TRAPS); 77 static void jni_exit(oop obj, Thread* THREAD); 78 79 // Handle all interpreter, compiler and jni cases 80 static int wait(Handle obj, jlong millis, TRAPS); 81 static void notify(Handle obj, TRAPS); 82 static void notifyall(Handle obj, TRAPS); 83 84 static bool quick_notify(oopDesc* obj, Thread* self, bool All); 85 static bool quick_enter(oop obj, Thread* self, BasicLock* Lock); 86 87 // Special internal-use-only method for use by JVM infrastructure 88 // that needs to wait() on a java-level object but that can't risk 89 // throwing unexpected InterruptedExecutionExceptions. 90 static void wait_uninterruptibly(Handle obj, jlong Millis, Thread* THREAD); 91 92 // used by classloading to free classloader object lock, 93 // wait on an internal lock, and reclaim original lock 94 // with original recursion count 95 static intptr_t complete_exit(Handle obj, TRAPS); 96 static void reenter (Handle obj, intptr_t recursion, TRAPS); 97 98 // thread-specific and global ObjectMonitor free list accessors 99 static ObjectMonitor* om_alloc(Thread* self); 100 static void om_release(Thread* self, ObjectMonitor* m, 101 bool FromPerThreadAlloc); 102 static void om_flush(Thread* self); 103 104 // Inflate light weight monitor to heavy weight monitor 105 static ObjectMonitor* inflate(Thread* self, oop obj, const InflateCause cause); 106 // This version is only for internal use 107 static void inflate_helper(oop obj); 108 static const char* inflate_cause_name(const InflateCause cause); 109 110 // Returns the identity hash value for an oop 111 // NOTE: It may cause monitor inflation 112 static intptr_t identity_hash_value_for(Handle obj); 113 static intptr_t FastHashCode(Thread* self, oop obj); 114 115 // java.lang.Thread support 116 static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj); 117 static LockOwnership query_lock_ownership(JavaThread* self, Handle h_obj); 118 119 static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj); 120 121 // JNI detach support 122 static void release_monitors_owned_by_thread(TRAPS); 123 static void monitors_iterate(MonitorClosure* m); 124 125 // GC: we current use aggressive monitor deflation policy 126 // Basically we deflate all monitors that are not busy. 127 // An adaptive profile-based deflation policy could be used if needed 128 static void deflate_idle_monitors(DeflateMonitorCounters* counters); 129 static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters); 130 static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters); 131 static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters); 132 133 // For a given monitor list: global or per-thread, deflate idle monitors 134 static int deflate_monitor_list(ObjectMonitor** list_p, 135 ObjectMonitor** free_head_p, 136 ObjectMonitor** free_tail_p); 137 static bool deflate_monitor(ObjectMonitor* mid, oop obj, 138 ObjectMonitor** free_head_p, 139 ObjectMonitor** free_tail_p); 140 static bool is_cleanup_needed(); 141 static void oops_do(OopClosure* f); 142 // Process oops in thread local used monitors 143 static void thread_local_used_oops_do(Thread* thread, OopClosure* f); 144 145 // debugging 146 static void audit_and_print_stats(bool on_exit); 147 static void chk_free_entry(JavaThread* jt, ObjectMonitor* n, 148 outputStream * out, int *error_cnt_p); 149 static void chk_global_free_list_and_count(outputStream * out, 150 int *error_cnt_p); 151 static void chk_global_in_use_list_and_count(outputStream * out, 152 int *error_cnt_p); 153 static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 154 outputStream * out, int *error_cnt_p); 155 static void chk_per_thread_in_use_list_and_count(JavaThread *jt, 156 outputStream * out, 157 int *error_cnt_p); 158 static void chk_per_thread_free_list_and_count(JavaThread *jt, 159 outputStream * out, 160 int *error_cnt_p); 161 static void log_in_use_monitor_details(outputStream * out, bool on_exit); 162 static int log_monitor_list_counts(outputStream * out); 163 static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; 164 165 private: 166 friend class SynchronizerTest; 167 168 enum { _BLOCKSIZE = 128 }; 169 // global list of blocks of monitors 170 static PaddedObjectMonitor* volatile g_block_list; 171 // global monitor free list 172 static ObjectMonitor* volatile g_free_list; 173 // global monitor in-use list, for moribund threads, 174 // monitors they inflated need to be scanned for deflation 175 static ObjectMonitor* volatile g_om_in_use_list; 176 // count of entries in g_om_in_use_list 177 static int g_om_in_use_count; 178 179 // Process oops in all global used monitors (i.e. moribund thread's monitors) 180 static void global_used_oops_do(OopClosure* f); 181 // Process oops in monitors on the given list 182 static void list_oops_do(ObjectMonitor* list, OopClosure* f); 183 184 // Support for SynchronizerTest access to GVars fields: 185 static u_char* get_gvars_addr(); 186 static u_char* get_gvars_hc_sequence_addr(); 187 static size_t get_gvars_size(); 188 static u_char* get_gvars_stw_random_addr(); 189 }; 190 191 // ObjectLocker enforces balanced locking and can never throw an 192 // IllegalMonitorStateException. However, a pending exception may 193 // have to pass through, and we must also be able to deal with 194 // asynchronous exceptions. The caller is responsible for checking 195 // the thread's pending exception if needed. 196 class ObjectLocker : public StackObj { 197 private: 198 Thread* _thread; 199 Handle _obj; 200 BasicLock _lock; 201 bool _dolock; // default true 202 public: 203 ObjectLocker(Handle obj, Thread* thread, bool do_lock = true); 204 ~ObjectLocker(); 205 206 // Monitor behavior 207 void wait(TRAPS) { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever 208 void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); } 209 void wait_uninterruptibly(TRAPS) { ObjectSynchronizer::wait_uninterruptibly(_obj, 0, CHECK); } 210 // complete_exit gives up lock completely, returning recursion count 211 // reenter reclaims lock with original recursion count 212 intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, THREAD); } 213 void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); } 214 }; 215 216 #endif // SHARE_RUNTIME_SYNCHRONIZER_HPP