< prev index next >

src/hotspot/share/runtime/synchronizer.hpp

Print this page
rev 58110 : v2.09a with 8235795, 8235931 and 8236035 extracted; rebased to jdk-14+28; merge with 8236035.patch.cr1; merge with 8235795.patch.cr1; merge with 8236035.patch.cr2; merge with 8235795.patch.cr2; merge with 8235795.patch.cr3.


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_RUNTIME_SYNCHRONIZER_HPP
  26 #define SHARE_RUNTIME_SYNCHRONIZER_HPP
  27 
  28 #include "memory/padded.hpp"
  29 #include "oops/markWord.hpp"
  30 #include "runtime/basicLock.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/perfData.hpp"
  33 
  34 class ObjectMonitor;

  35 class ThreadsList;
  36 
  37 #ifndef OM_CACHE_LINE_SIZE
  38 // Use DEFAULT_CACHE_LINE_SIZE if not already specified for
  39 // the current build platform.
  40 #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
  41 #endif
  42 
  43 typedef PaddedEnd<ObjectMonitor, OM_CACHE_LINE_SIZE> PaddedObjectMonitor;
  44 
  45 struct DeflateMonitorCounters {
  46   int n_in_use;              // currently associated with objects
  47   int n_in_circulation;      // extant
  48   int n_scavenged;           // reclaimed (global and per-thread)
  49   int per_thread_scavenged;  // per-thread scavenge total
  50   double per_thread_times;   // per-thread scavenge times
  51 };
  52 
  53 class ObjectSynchronizer : AllStatic {
  54   friend class VMStructs;
  55  public:
  56   typedef enum {
  57     owner_self,
  58     owner_none,
  59     owner_other
  60   } LockOwnership;
  61 
  62   typedef enum {
  63     inflate_cause_vm_internal = 0,
  64     inflate_cause_monitor_enter = 1,
  65     inflate_cause_wait = 2,
  66     inflate_cause_notify = 3,
  67     inflate_cause_hash_code = 4,
  68     inflate_cause_jni_enter = 5,
  69     inflate_cause_jni_exit = 6,


  91   static bool quick_enter(oop obj, Thread* self, BasicLock* Lock);
  92 
  93   // Special internal-use-only method for use by JVM infrastructure
  94   // that needs to wait() on a java-level object but that can't risk
  95   // throwing unexpected InterruptedExecutionExceptions.
  96   static void wait_uninterruptibly(Handle obj, jlong Millis, Thread* THREAD);
  97 
  98   // used by classloading to free classloader object lock,
  99   // wait on an internal lock, and reclaim original lock
 100   // with original recursion count
 101   static intx complete_exit(Handle obj, TRAPS);
 102   static void reenter (Handle obj, intx recursions, TRAPS);
 103 
 104   // thread-specific and global ObjectMonitor free list accessors
 105   static ObjectMonitor* om_alloc(Thread* self);
 106   static void om_release(Thread* self, ObjectMonitor* m,
 107                          bool FromPerThreadAlloc);
 108   static void om_flush(Thread* self);
 109 
 110   // Inflate light weight monitor to heavy weight monitor
 111   static ObjectMonitor* inflate(Thread* self, oop obj, const InflateCause cause);

 112   // This version is only for internal use
 113   static void inflate_helper(oop obj);
 114   static const char* inflate_cause_name(const InflateCause cause);
 115 
 116   // Returns the identity hash value for an oop
 117   // NOTE: It may cause monitor inflation
 118   static intptr_t identity_hash_value_for(Handle obj);
 119   static intptr_t FastHashCode(Thread* self, oop obj);
 120 
 121   // java.lang.Thread support
 122   static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
 123   static LockOwnership query_lock_ownership(JavaThread* self, Handle h_obj);
 124 
 125   static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj);
 126 
 127   // JNI detach support
 128   static void release_monitors_owned_by_thread(TRAPS);
 129   static void monitors_iterate(MonitorClosure* m);
 130 
 131   // GC: we current use aggressive monitor deflation policy
 132   // Basically we deflate all monitors that are not busy.
 133   // An adaptive profile-based deflation policy could be used if needed
 134   static void deflate_idle_monitors(DeflateMonitorCounters* counters);




 135   static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters);
 136   static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters);
 137   static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters);
 138 
 139   // For a given monitor list: global or per-thread, deflate idle monitors
 140   static int deflate_monitor_list(ObjectMonitor** list_p,
 141                                   int* count_p,
 142                                   ObjectMonitor** free_head_p,
 143                                   ObjectMonitor** free_tail_p);







 144   static bool deflate_monitor(ObjectMonitor* mid, oop obj,
 145                               ObjectMonitor** free_head_p,
 146                               ObjectMonitor** free_tail_p);
 147   static bool is_cleanup_needed();



 148   static bool needs_monitor_scavenge();






 149   static void oops_do(OopClosure* f);
 150   // Process oops in thread local used monitors
 151   static void thread_local_used_oops_do(Thread* thread, OopClosure* f);
 152 
 153   // debugging
 154   static void audit_and_print_stats(bool on_exit);
 155   static void chk_free_entry(JavaThread* jt, ObjectMonitor* n,
 156                              outputStream * out, int *error_cnt_p);
 157   static void chk_global_free_list_and_count(outputStream * out,
 158                                              int *error_cnt_p);


 159   static void chk_global_in_use_list_and_count(outputStream * out,
 160                                                int *error_cnt_p);
 161   static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
 162                                outputStream * out, int *error_cnt_p);
 163   static void chk_per_thread_in_use_list_and_count(JavaThread *jt,
 164                                                    outputStream * out,
 165                                                    int *error_cnt_p);
 166   static void chk_per_thread_free_list_and_count(JavaThread *jt,
 167                                                  outputStream * out,
 168                                                  int *error_cnt_p);
 169   static void log_in_use_monitor_details(outputStream * out);
 170   static int  log_monitor_list_counts(outputStream * out);
 171   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
 172 


 173  private:
 174   friend class SynchronizerTest;
 175 
 176   enum { _BLOCKSIZE = 128 };
 177   // global list of blocks of monitors
 178   static PaddedObjectMonitor* g_block_list;



 179 
 180   // Function to prepend new blocks to the appropriate lists:
 181   static void prepend_block_to_lists(PaddedObjectMonitor* new_blk);
 182 
 183   // Process oops in all global used monitors (i.e. moribund thread's monitors)
 184   static void global_used_oops_do(OopClosure* f);
 185   // Process oops in monitors on the given list
 186   static void list_oops_do(ObjectMonitor* list, OopClosure* f);
 187 
 188   // Support for SynchronizerTest access to GVars fields:
 189   static u_char* get_gvars_addr();
 190   static u_char* get_gvars_hc_sequence_addr();
 191   static size_t get_gvars_size();
 192   static u_char* get_gvars_stw_random_addr();
 193 };
 194 
 195 // ObjectLocker enforces balanced locking and can never throw an
 196 // IllegalMonitorStateException. However, a pending exception may
 197 // have to pass through, and we must also be able to deal with
 198 // asynchronous exceptions. The caller is responsible for checking




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_RUNTIME_SYNCHRONIZER_HPP
  26 #define SHARE_RUNTIME_SYNCHRONIZER_HPP
  27 
  28 #include "memory/padded.hpp"
  29 #include "oops/markWord.hpp"
  30 #include "runtime/basicLock.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/perfData.hpp"
  33 
  34 class ObjectMonitor;
  35 class ObjectMonitorHandle;
  36 class ThreadsList;
  37 
  38 #ifndef OM_CACHE_LINE_SIZE
  39 // Use DEFAULT_CACHE_LINE_SIZE if not already specified for
  40 // the current build platform.
  41 #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
  42 #endif
  43 
  44 typedef PaddedEnd<ObjectMonitor, OM_CACHE_LINE_SIZE> PaddedObjectMonitor;
  45 
  46 struct DeflateMonitorCounters {
  47   volatile int n_in_use;              // currently associated with objects
  48   volatile int n_in_circulation;      // extant
  49   volatile int n_scavenged;           // reclaimed (global and per-thread)
  50   volatile int per_thread_scavenged;  // per-thread scavenge total
  51            double per_thread_times;   // per-thread scavenge times
  52 };
  53 
  54 class ObjectSynchronizer : AllStatic {
  55   friend class VMStructs;
  56  public:
  57   typedef enum {
  58     owner_self,
  59     owner_none,
  60     owner_other
  61   } LockOwnership;
  62 
  63   typedef enum {
  64     inflate_cause_vm_internal = 0,
  65     inflate_cause_monitor_enter = 1,
  66     inflate_cause_wait = 2,
  67     inflate_cause_notify = 3,
  68     inflate_cause_hash_code = 4,
  69     inflate_cause_jni_enter = 5,
  70     inflate_cause_jni_exit = 6,


  92   static bool quick_enter(oop obj, Thread* self, BasicLock* Lock);
  93 
  94   // Special internal-use-only method for use by JVM infrastructure
  95   // that needs to wait() on a java-level object but that can't risk
  96   // throwing unexpected InterruptedExecutionExceptions.
  97   static void wait_uninterruptibly(Handle obj, jlong Millis, Thread* THREAD);
  98 
  99   // used by classloading to free classloader object lock,
 100   // wait on an internal lock, and reclaim original lock
 101   // with original recursion count
 102   static intx complete_exit(Handle obj, TRAPS);
 103   static void reenter (Handle obj, intx recursions, TRAPS);
 104 
 105   // thread-specific and global ObjectMonitor free list accessors
 106   static ObjectMonitor* om_alloc(Thread* self);
 107   static void om_release(Thread* self, ObjectMonitor* m,
 108                          bool FromPerThreadAlloc);
 109   static void om_flush(Thread* self);
 110 
 111   // Inflate light weight monitor to heavy weight monitor
 112   static void inflate(ObjectMonitorHandle* omh_p, Thread* self, oop obj,
 113                       const InflateCause cause);
 114   // This version is only for internal use
 115   static void inflate_helper(ObjectMonitorHandle* omh_p, oop obj);
 116   static const char* inflate_cause_name(const InflateCause cause);
 117 
 118   // Returns the identity hash value for an oop
 119   // NOTE: It may cause monitor inflation
 120   static intptr_t identity_hash_value_for(Handle obj);
 121   static intptr_t FastHashCode(Thread* self, oop obj);
 122 
 123   // java.lang.Thread support
 124   static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
 125   static LockOwnership query_lock_ownership(JavaThread* self, Handle h_obj);
 126 
 127   static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj);
 128 
 129   // JNI detach support
 130   static void release_monitors_owned_by_thread(TRAPS);
 131   static void monitors_iterate(MonitorClosure* m);
 132 
 133   // GC: we current use aggressive monitor deflation policy
 134   // Basically we deflate all monitors that are not busy.
 135   // An adaptive profile-based deflation policy could be used if needed
 136   static void deflate_idle_monitors(DeflateMonitorCounters* counters);
 137   static void deflate_idle_monitors_using_JT();
 138   static void deflate_global_idle_monitors_using_JT();
 139   static void deflate_per_thread_idle_monitors_using_JT(JavaThread* target);
 140   static void deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target);
 141   static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters);
 142   static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters);
 143   static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters);
 144 
 145   // For a given monitor list: global or per-thread, deflate idle monitors
 146   static int deflate_monitor_list(ObjectMonitor** list_p,
 147                                   int* count_p,
 148                                   ObjectMonitor** free_head_p,
 149                                   ObjectMonitor** free_tail_p);
 150   // For a given in-use monitor list: global or per-thread, deflate idle
 151   // monitors using a JavaThread.
 152   static int deflate_monitor_list_using_JT(ObjectMonitor** list_p,
 153                                            int* count_p,
 154                                            ObjectMonitor** free_head_p,
 155                                            ObjectMonitor** free_tail_p,
 156                                            ObjectMonitor** saved_mid_in_use_p);
 157   static bool deflate_monitor(ObjectMonitor* mid, oop obj,
 158                               ObjectMonitor** free_head_p,
 159                               ObjectMonitor** free_tail_p);
 160   static bool deflate_monitor_using_JT(ObjectMonitor* mid,
 161                                        ObjectMonitor** free_head_p,
 162                                        ObjectMonitor** free_tail_p);
 163   static bool is_async_deflation_needed();
 164   static bool needs_monitor_scavenge();
 165   static bool is_safepoint_deflation_needed();
 166   static bool is_async_deflation_requested() { return _is_async_deflation_requested; }
 167   static bool is_special_deflation_requested() { return _is_special_deflation_requested; }
 168   static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; }
 169   static void set_is_special_deflation_requested(bool new_value) { _is_special_deflation_requested = new_value; }
 170   static jlong time_since_last_async_deflation_ms();
 171   static void oops_do(OopClosure* f);
 172   // Process oops in thread local used monitors
 173   static void thread_local_used_oops_do(Thread* thread, OopClosure* f);
 174 
 175   // debugging
 176   static void audit_and_print_stats(bool on_exit);
 177   static void chk_free_entry(JavaThread* jt, ObjectMonitor* n,
 178                              outputStream * out, int *error_cnt_p);
 179   static void chk_global_free_list_and_count(outputStream * out,
 180                                              int *error_cnt_p);
 181   static void chk_global_wait_list_and_count(outputStream * out,
 182                                              int *error_cnt_p);
 183   static void chk_global_in_use_list_and_count(outputStream * out,
 184                                                int *error_cnt_p);
 185   static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
 186                                outputStream * out, int *error_cnt_p);
 187   static void chk_per_thread_in_use_list_and_count(JavaThread *jt,
 188                                                    outputStream * out,
 189                                                    int *error_cnt_p);
 190   static void chk_per_thread_free_list_and_count(JavaThread *jt,
 191                                                  outputStream * out,
 192                                                  int *error_cnt_p);
 193   static void log_in_use_monitor_details(outputStream * out);
 194   static int  log_monitor_list_counts(outputStream * out);
 195   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
 196 
 197   static void do_safepoint_work(DeflateMonitorCounters* counters);
 198 
 199  private:
 200   friend class SynchronizerTest;
 201 
 202   enum { _BLOCKSIZE = 128 };
 203   // global list of blocks of monitors
 204   static PaddedObjectMonitor* g_block_list;
 205   static volatile bool _is_async_deflation_requested;
 206   static volatile bool _is_special_deflation_requested;
 207   static jlong         _last_async_deflation_time_ns;
 208 
 209   // Function to prepend new blocks to the appropriate lists:
 210   static void prepend_block_to_lists(PaddedObjectMonitor* new_blk);
 211 
 212   // Process oops in all global used monitors (i.e. moribund thread's monitors)
 213   static void global_used_oops_do(OopClosure* f);
 214   // Process oops in monitors on the given list
 215   static void list_oops_do(ObjectMonitor* list, OopClosure* f);
 216 
 217   // Support for SynchronizerTest access to GVars fields:
 218   static u_char* get_gvars_addr();
 219   static u_char* get_gvars_hc_sequence_addr();
 220   static size_t get_gvars_size();
 221   static u_char* get_gvars_stw_random_addr();
 222 };
 223 
 224 // ObjectLocker enforces balanced locking and can never throw an
 225 // IllegalMonitorStateException. However, a pending exception may
 226 // have to pass through, and we must also be able to deal with
 227 // asynchronous exceptions. The caller is responsible for checking


< prev index next >