1 /*
   2  * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
  26 #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
  27 
  28 #include "oops/markOop.hpp"
  29 #include "runtime/basicLock.hpp"
  30 #include "runtime/handles.hpp"
  31 #include "runtime/perfData.hpp"
  32 
  33 class ObjectMonitor;
  34 
  35 struct DeflateMonitorCounters {
  36   int nInuse;          // currently associated with objects
  37   int nInCirculation;  // extant
  38   int nScavenged;      // reclaimed
  39 };
  40 
  41 class ObjectSynchronizer : AllStatic {
  42   friend class VMStructs;
  43  public:
  44   typedef enum {
  45     owner_self,
  46     owner_none,
  47     owner_other
  48   } LockOwnership;
  49 
  50   typedef enum {
  51     inflate_cause_vm_internal = 0,
  52     inflate_cause_monitor_enter = 1,
  53     inflate_cause_wait = 2,
  54     inflate_cause_notify = 3,
  55     inflate_cause_hash_code = 4,
  56     inflate_cause_jni_enter = 5,
  57     inflate_cause_jni_exit = 6,
  58     inflate_cause_nof = 7 // Number of causes
  59   } InflateCause;
  60 
  61   // exit must be implemented non-blocking, since the compiler cannot easily handle
  62   // deoptimization at monitor exit. Hence, it does not take a Handle argument.
  63 
  64   // This is full version of monitor enter and exit. I choose not
  65   // to use enter() and exit() in order to make sure user be ware
  66   // of the performance and semantics difference. They are normally
  67   // used by ObjectLocker etc. The interpreter and compiler use
  68   // assembly copies of these routines. Please keep them synchronized.
  69   //
  70   // attempt_rebias flag is used by UseBiasedLocking implementation
  71   static void fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias,
  72                          TRAPS);
  73   static void fast_exit(oop obj, BasicLock* lock, Thread* THREAD);
  74 
  75   // WARNING: They are ONLY used to handle the slow cases. They should
  76   // only be used when the fast cases failed. Use of these functions
  77   // without previous fast case check may cause fatal error.
  78   static void slow_enter(Handle obj, BasicLock* lock, TRAPS);
  79   static void slow_exit(oop obj, BasicLock* lock, Thread* THREAD);
  80 
  81   // Used only to handle jni locks or other unmatched monitor enter/exit
  82   // Internally they will use heavy weight monitor.
  83   static void jni_enter(Handle obj, TRAPS);
  84   static void jni_exit(oop obj, Thread* THREAD);
  85 
  86   // Handle all interpreter, compiler and jni cases
  87   static int  wait(Handle obj, jlong millis, TRAPS);
  88   static void notify(Handle obj, TRAPS);
  89   static void notifyall(Handle obj, TRAPS);
  90 
  91   static bool quick_notify(oopDesc* obj, Thread* Self, bool All);
  92   static bool quick_enter(oop obj, Thread* Self, BasicLock* Lock);
  93 
  94   // Special internal-use-only method for use by JVM infrastructure
  95   // that needs to wait() on a java-level object but that can't risk
  96   // throwing unexpected InterruptedExecutionExceptions.
  97   static void waitUninterruptibly(Handle obj, jlong Millis, Thread * THREAD);
  98 
  99   // used by classloading to free classloader object lock,
 100   // wait on an internal lock, and reclaim original lock
 101   // with original recursion count
 102   static intptr_t complete_exit(Handle obj, TRAPS);
 103   static void reenter (Handle obj, intptr_t recursion, TRAPS);
 104 
 105   // thread-specific and global objectMonitor free list accessors
 106   static void verifyInUse(Thread * Self);
 107   static ObjectMonitor * omAlloc(Thread * Self);
 108   static void omRelease(Thread * Self, ObjectMonitor * m,
 109                         bool FromPerThreadAlloc);
 110   static void omFlush(Thread * Self);
 111 
 112   // Inflate light weight monitor to heavy weight monitor
 113   static ObjectMonitor* inflate(Thread * Self, oop obj, const InflateCause cause);
 114   // This version is only for internal use
 115   static ObjectMonitor* inflate_helper(oop obj);
 116   static const char* inflate_cause_name(const InflateCause cause);
 117 
 118   // Returns the identity hash value for an oop
 119   // NOTE: It may cause monitor inflation
 120   static intptr_t identity_hash_value_for(Handle obj);
 121   static intptr_t FastHashCode(Thread * Self, oop obj);
 122 
 123   // java.lang.Thread support
 124   static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
 125   static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
 126 
 127   static JavaThread* get_lock_owner(Handle h_obj, bool doLock);
 128 
 129   // JNI detach support
 130   static void release_monitors_owned_by_thread(TRAPS);
 131   static void monitors_iterate(MonitorClosure* m);
 132 
 133   // GC: we current use aggressive monitor deflation policy
 134   // Basically we deflate all monitors that are not busy.
 135   // An adaptive profile-based deflation policy could be used if needed
 136   static void deflate_idle_monitors(DeflateMonitorCounters* counters);
 137   static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters);
 138   static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters);
 139   static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters);
 140 
 141   // For a given monitor list: global or per-thread, deflate idle monitors
 142   static int deflate_monitor_list(ObjectMonitor** listheadp,
 143                                   ObjectMonitor** freeHeadp,
 144                                   ObjectMonitor** freeTailp);
 145   static bool deflate_monitor(ObjectMonitor* mid, oop obj,
 146                               ObjectMonitor** freeHeadp,
 147                               ObjectMonitor** freeTailp);
 148   static bool is_cleanup_needed();
 149   static void oops_do(OopClosure* f);
 150   // Process oops in thread local used monitors
 151   static void thread_local_used_oops_do(Thread* thread, OopClosure* f);
 152 
 153   // debugging
 154   static void sanity_checks(const bool verbose,
 155                             const unsigned int cache_line_size,
 156                             int *error_cnt_ptr, int *warning_cnt_ptr);
 157   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
 158 
 159  private:
 160   enum { _BLOCKSIZE = 128 };
 161   // global list of blocks of monitors
 162   // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 163   // want to expose the PaddedEnd template more than necessary.
 164   static ObjectMonitor * volatile gBlockList;
 165   // global monitor free list
 166   static ObjectMonitor * volatile gFreeList;
 167   // global monitor in-use list, for moribund threads,
 168   // monitors they inflated need to be scanned for deflation
 169   static ObjectMonitor * volatile gOmInUseList;
 170   // count of entries in gOmInUseList
 171   static int gOmInUseCount;
 172 
 173   // Process oops in all monitors
 174   static void global_oops_do(OopClosure* f);
 175   // Process oops in all global used monitors (i.e. moribund thread's monitors)
 176   static void global_used_oops_do(OopClosure* f);
 177   // Process oops in monitors on the given list
 178   static void list_oops_do(ObjectMonitor* list, OopClosure* f);
 179 
 180 };
 181 
 182 // ObjectLocker enforced balanced locking and can never thrown an
 183 // IllegalMonitorStateException. However, a pending exception may
 184 // have to pass through, and we must also be able to deal with
 185 // asynchronous exceptions. The caller is responsible for checking
 186 // the threads pending exception if needed.
 187 // doLock was added to support classloading with UnsyncloadClass which
 188 // requires flag based choice of locking the classloader lock.
 189 class ObjectLocker : public StackObj {
 190  private:
 191   Thread*   _thread;
 192   Handle    _obj;
 193   BasicLock _lock;
 194   bool      _dolock;   // default true
 195  public:
 196   ObjectLocker(Handle obj, Thread* thread, bool doLock = true);
 197   ~ObjectLocker();
 198 
 199   // Monitor behavior
 200   void wait(TRAPS)  { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever
 201   void notify_all(TRAPS)  { ObjectSynchronizer::notifyall(_obj, CHECK); }
 202   void waitUninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); }
 203   // complete_exit gives up lock completely, returning recursion count
 204   // reenter reclaims lock with original recursion count
 205   intptr_t complete_exit(TRAPS)  { return ObjectSynchronizer::complete_exit(_obj, THREAD); }
 206   void reenter(intptr_t recursion, TRAPS)  { ObjectSynchronizer::reenter(_obj, recursion, CHECK); }
 207 };
 208 
 209 #endif // SHARE_VM_RUNTIME_SYNCHRONIZER_HPP