src/share/vm/memory/gcLocker.hpp

Print this page
rev 6069 : 8028498: runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java asserts in RT_Baseline
Summary: Preventing GCs to occur before completely initialized.
Reviewed-by:


  37 # include "os_solaris.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_windows
  40 # include "os_windows.inline.hpp"
  41 #endif
  42 #ifdef TARGET_OS_FAMILY_bsd
  43 # include "os_bsd.inline.hpp"
  44 #endif
  45 
  46 // The direct lock/unlock calls do not force a collection if an unlock
  47 // decrements the count to zero. Avoid calling these if at all possible.
  48 
  49 class GC_locker: public AllStatic {
  50  private:
  51   // The _jni_lock_count keeps track of the number of threads that are
  52   // currently in a critical region.  It's only kept up to date when
  53   // _needs_gc is true.  The current value is computed during
  54   // safepointing and decremented during the slow path of GC_locker
  55   // unlocking.
  56   static volatile jint _jni_lock_count;  // number of jni active instances.
  57 
  58   static volatile jint _lock_count;      // number of other active instances
  59   static volatile bool _needs_gc;        // heap is filling, we need a GC
  60                                          // note: bool is typedef'd as jint
  61   static volatile bool _doing_gc;        // unlock_critical() is doing a GC
  62 
  63 #ifdef ASSERT
  64   // This lock count is updated for all operations and is used to
  65   // validate the jni_lock_count that is computed during safepoints.
  66   static volatile jint _debug_jni_lock_count;
  67 #endif
  68 
  69   // Accessors
  70   static bool is_jni_active() {
  71     assert(_needs_gc, "only valid when _needs_gc is set");
  72     return _jni_lock_count > 0;
  73   }
  74 
  75   // At a safepoint, visit all threads and count the number of active
  76   // critical sections.  This is used to ensure that all active
  77   // critical sections are exited before a new one is started.
  78   static void verify_critical_count() NOT_DEBUG_RETURN;
  79 
  80   static void jni_lock(JavaThread* thread);
  81   static void jni_unlock(JavaThread* thread);
  82 
  83   static bool is_active_internal() {
  84     verify_critical_count();
  85     return _lock_count > 0 || _jni_lock_count > 0;
  86   }
  87 
  88  public:
  89   // Accessors
  90   static bool is_active() {
  91     assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
  92     return is_active_internal();
  93   }
  94   static bool needs_gc()       { return _needs_gc;                        }
  95 
  96   // Shorthand
  97   static bool is_active_and_needs_gc() {
  98     // Use is_active_internal since _needs_gc can change from true to
  99     // false outside of a safepoint, triggering the assert in
 100     // is_active.
 101     return needs_gc() && is_active_internal();
 102   }
 103 
 104   // In debug mode track the locking state at all times
 105   static void increment_debug_jni_lock_count() {


 114     Atomic::dec(&_debug_jni_lock_count);
 115 #endif
 116   }
 117 
 118   // Set the current lock count
 119   static void set_jni_lock_count(int count) {
 120     _jni_lock_count = count;
 121     verify_critical_count();
 122   }
 123 
 124   // Sets _needs_gc if is_active() is true. Returns is_active().
 125   static bool check_active_before_gc();
 126 
 127   // Stalls the caller (who should not be in a jni critical section)
 128   // until needs_gc() clears. Note however that needs_gc() may be
 129   // set at a subsequent safepoint and/or cleared under the
 130   // JNICritical_lock, so the caller may not safely assert upon
 131   // return from this method that "!needs_gc()" since that is
 132   // not a stable predicate.
 133   static void stall_until_clear();
 134 
 135   // Non-structured GC locking: currently needed for JNI. Use with care!
 136   static void lock();
 137   static void unlock();
 138 
 139   // The following two methods are used for JNI critical regions.
 140   // If we find that we failed to perform a GC because the GC_locker
 141   // was active, arrange for one as soon as possible by allowing
 142   // all threads in critical regions to complete, but not allowing
 143   // other critical regions to be entered. The reasons for that are:
 144   // 1) a GC request won't be starved by overlapping JNI critical
 145   //    region activities, which can cause unnecessary OutOfMemory errors.
 146   // 2) even if allocation requests can still be satisfied before GC locker
 147   //    becomes inactive, for example, in tenured generation possibly with
 148   //    heap expansion, those allocations can trigger lots of safepointing
 149   //    attempts (ineffective GC attempts) and require Heap_lock which
 150   //    slow down allocations tremendously.
 151   //
 152   // Note that critical regions can be nested in a single thread, so
 153   // we must allow threads already in critical regions to continue.
 154   //
 155   // JNI critical regions are the only participants in this scheme
 156   // because they are, by spec, well bounded while in a critical region.
 157   //




  37 # include "os_solaris.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_windows
  40 # include "os_windows.inline.hpp"
  41 #endif
  42 #ifdef TARGET_OS_FAMILY_bsd
  43 # include "os_bsd.inline.hpp"
  44 #endif
  45 
  46 // The direct lock/unlock calls do not force a collection if an unlock
  47 // decrements the count to zero. Avoid calling these if at all possible.
  48 
  49 class GC_locker: public AllStatic {
  50  private:
  51   // The _jni_lock_count keeps track of the number of threads that are
  52   // currently in a critical region.  It's only kept up to date when
  53   // _needs_gc is true.  The current value is computed during
  54   // safepointing and decremented during the slow path of GC_locker
  55   // unlocking.
  56   static volatile jint _jni_lock_count;  // number of jni active instances.


  57   static volatile bool _needs_gc;        // heap is filling, we need a GC
  58                                          // note: bool is typedef'd as jint
  59   static volatile bool _doing_gc;        // unlock_critical() is doing a GC
  60 
  61 #ifdef ASSERT
  62   // This lock count is updated for all operations and is used to
  63   // validate the jni_lock_count that is computed during safepoints.
  64   static volatile jint _debug_jni_lock_count;
  65 #endif
  66 






  67   // At a safepoint, visit all threads and count the number of active
  68   // critical sections.  This is used to ensure that all active
  69   // critical sections are exited before a new one is started.
  70   static void verify_critical_count() NOT_DEBUG_RETURN;
  71 
  72   static void jni_lock(JavaThread* thread);
  73   static void jni_unlock(JavaThread* thread);
  74 
  75   static bool is_active_internal() {
  76     verify_critical_count();
  77     return _jni_lock_count > 0;
  78   }
  79 
  80  public:
  81   // Accessors
  82   static bool is_active() {
  83     assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
  84     return is_active_internal();
  85   }
  86   static bool needs_gc()       { return _needs_gc;                        }
  87 
  88   // Shorthand
  89   static bool is_active_and_needs_gc() {
  90     // Use is_active_internal since _needs_gc can change from true to
  91     // false outside of a safepoint, triggering the assert in
  92     // is_active.
  93     return needs_gc() && is_active_internal();
  94   }
  95 
  96   // In debug mode track the locking state at all times
  97   static void increment_debug_jni_lock_count() {


 106     Atomic::dec(&_debug_jni_lock_count);
 107 #endif
 108   }
 109 
 110   // Set the current lock count
 111   static void set_jni_lock_count(int count) {
 112     _jni_lock_count = count;
 113     verify_critical_count();
 114   }
 115 
 116   // Sets _needs_gc if is_active() is true. Returns is_active().
 117   static bool check_active_before_gc();
 118 
 119   // Stalls the caller (who should not be in a jni critical section)
 120   // until needs_gc() clears. Note however that needs_gc() may be
 121   // set at a subsequent safepoint and/or cleared under the
 122   // JNICritical_lock, so the caller may not safely assert upon
 123   // return from this method that "!needs_gc()" since that is
 124   // not a stable predicate.
 125   static void stall_until_clear();




 126 
 127   // The following two methods are used for JNI critical regions.
 128   // If we find that we failed to perform a GC because the GC_locker
 129   // was active, arrange for one as soon as possible by allowing
 130   // all threads in critical regions to complete, but not allowing
 131   // other critical regions to be entered. The reasons for that are:
 132   // 1) a GC request won't be starved by overlapping JNI critical
 133   //    region activities, which can cause unnecessary OutOfMemory errors.
 134   // 2) even if allocation requests can still be satisfied before GC locker
 135   //    becomes inactive, for example, in tenured generation possibly with
 136   //    heap expansion, those allocations can trigger lots of safepointing
 137   //    attempts (ineffective GC attempts) and require Heap_lock which
 138   //    slow down allocations tremendously.
 139   //
 140   // Note that critical regions can be nested in a single thread, so
 141   // we must allow threads already in critical regions to continue.
 142   //
 143   // JNI critical regions are the only participants in this scheme
 144   // because they are, by spec, well bounded while in a critical region.
 145   //