< prev index next >

src/share/vm/memory/sharedHeap.hpp

Print this page
rev 7973 : G1RootProcessor
rev 7974 : Convert G1 to G1RootProcessor
rev 7975 : Move remaining root processing to GenCollectedHeap


  87 //  and then to set it to 0 after that task has completed.  A value of
  88 //  0 is a "special" value in set_n_threads() which translates to
  89 //  setting _n_threads to 1.
  90 //
  91 //  Some code uses _n_termination to decide if work should be done in
  92 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  93 //  is an example of such code.  Look for variable "is_par" for other
  94 //  examples.
  95 //
  96 //  The active_workers is not reset to 0 after a parallel phase.  It's
  97 //  value may be used in later phases and in one instance at least
  98 //  (the parallel remark) it has to be used (the parallel remark depends
  99 //  on the partitioning done in the previous parallel scavenge).
 100 
 101 class SharedHeap : public CollectedHeap {
 102   friend class VMStructs;
 103 
 104   friend class VM_GC_Operation;
 105   friend class VM_CGC_Operation;
 106 
 107 private:
 108   // For claiming strong_roots tasks.
 109   SubTasksDone* _process_strong_tasks;
 110 
 111 protected:
 112   // There should be only a single instance of "SharedHeap" in a program.
 113   // This is enforced with the protected constructor below, which will also
 114   // set the static pointer "_sh" to that instance.
 115   static SharedHeap* _sh;
 116 
 117   // A gc policy, controls global gc resource issues
 118   CollectorPolicy *_collector_policy;
 119 
 120   // See the discussion below, in the specification of the reader function
 121   // for this variable.
 122   int _strong_roots_parity;
 123 
 124   // If we're doing parallel GC, use this gang of threads.
 125   FlexibleWorkGang* _workers;
 126 
 127   // Full initialization is done in a concrete subtype's "initialize"
 128   // function.
 129   SharedHeap(CollectorPolicy* policy_);
 130 
 131   // Returns true if the calling thread holds the heap lock,
 132   // or the calling thread is a par gc thread and the heap_lock is held
 133   // by the vm thread doing a gc operation.
 134   bool heap_lock_held_for_gc();
 135   // True if the heap_lock is held by the a non-gc thread invoking a gc
 136   // operation.
 137   bool _thread_holds_heap_lock_for_gc;
 138 
 139 public:
 140   static SharedHeap* heap() { return _sh; }
 141 
 142   void set_barrier_set(BarrierSet* bs);
 143   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
 144 
 145   // Does operations required after initialization has been done.
 146   virtual void post_initialize();
 147 
 148   // Initialization of ("weak") reference processing support
 149   virtual void ref_processing_init();
 150 
 151   // Iteration functions.
 152   void oop_iterate(ExtendedOopClosure* cl) = 0;
 153 
 154   // Iterate over all spaces in use in the heap, in an undefined order.
 155   virtual void space_iterate(SpaceClosure* cl) = 0;
 156 
 157   // A SharedHeap will contain some number of spaces.  This finds the
 158   // space whose reserved area contains the given address, or else returns
 159   // NULL.
 160   virtual Space* space_containing(const void* addr) const = 0;
 161 
 162   bool no_gc_in_progress() { return !is_gc_active(); }
 163 


 176   // The idea is that objects representing fine-grained tasks, such as
 177   // threads, will contain a "parity" field.  A task will is claimed in the
 178   // current "process_roots" call only if its parity field is the
 179   // same as the "strong_roots_parity"; task claiming is accomplished by
 180   // updating the parity field to the strong_roots_parity with a CAS.
 181   //
 182   // If the client meats this spec, then strong_roots_parity() will have
 183   // the following properties:
 184   //   a) to return a different value than was returned before the last
 185   //      call to change_strong_roots_parity, and
 186   //   c) to never return a distinguished value (zero) with which such
 187   //      task-claiming variables may be initialized, to indicate "never
 188   //      claimed".
 189  public:
 190   int strong_roots_parity() { return _strong_roots_parity; }
 191 
 192   // Call these in sequential code around process_roots.
 193   // strong_roots_prologue calls change_strong_roots_parity, if
 194   // parallel tasks are enabled.
 195   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
 196     // Used to implement the Thread work barrier.
 197     static Monitor* _lock;
 198 
 199     SharedHeap*   _sh;
 200     volatile jint _n_workers_done_with_threads;
 201 
 202    public:
 203     StrongRootsScope(SharedHeap* heap, bool activate = true);
 204     ~StrongRootsScope();
 205 
 206     // Mark that this thread is done with the Threads work.
 207     void mark_worker_done_with_threads(uint n_workers);
 208     // Wait until all n_workers are done with the Threads work.
 209     void wait_until_all_workers_done_with_threads(uint n_workers);
 210   };
 211   friend class StrongRootsScope;
 212 
 213   // The current active StrongRootScope
 214   StrongRootsScope* _strong_roots_scope;
 215 
 216   StrongRootsScope* active_strong_roots_scope() const;
 217 
 218  private:
 219   void register_strong_roots_scope(StrongRootsScope* scope);
 220   void unregister_strong_roots_scope(StrongRootsScope* scope);
 221   void change_strong_roots_parity();
 222 
 223  public:
 224   enum ScanningOption {
 225     SO_None                =  0x0,
 226     SO_AllCodeCache        =  0x8,
 227     SO_ScavengeCodeCache   = 0x10
 228   };
 229 
 230   FlexibleWorkGang* workers() const { return _workers; }
 231 
 232   // Invoke the "do_oop" method the closure "roots" on all root locations.
 233   // The "so" argument determines which roots the closure is applied to:
 234   // "SO_None" does none;
 235   // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
 236   // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
 237   void process_roots(bool activate_scope,
 238                      ScanningOption so,
 239                      OopClosure* strong_roots,
 240                      OopClosure* weak_roots,
 241                      CLDClosure* strong_cld_closure,
 242                      CLDClosure* weak_cld_closure,
 243                      CodeBlobClosure* code_roots);
 244   void process_all_roots(bool activate_scope,
 245                          ScanningOption so,
 246                          OopClosure* roots,
 247                          CLDClosure* cld_closure,
 248                          CodeBlobClosure* code_roots);
 249   void process_strong_roots(bool activate_scope,
 250                             ScanningOption so,
 251                             OopClosure* roots,
 252                             CLDClosure* cld_closure,
 253                             CodeBlobClosure* code_roots);
 254 
 255 
 256   // Apply "root_closure" to the JNI weak roots..
 257   void process_weak_roots(OopClosure* root_closure);
 258 
 259   // The functions below are helper functions that a subclass of
 260   // "SharedHeap" can use in the implementation of its virtual
 261   // functions.
 262 
 263 public:
 264 
 265   // Do anything common to GC's.
 266   virtual void gc_prologue(bool full) = 0;
 267   virtual void gc_epilogue(bool full) = 0;
 268 
 269   // Sets the number of parallel threads that will be doing tasks
 270   // (such as process roots) subsequently.
 271   virtual void set_par_threads(uint t);
 272 
 273   int n_termination();
 274   void set_n_termination(int t);
 275 
 276   //
 277   // New methods from CollectedHeap
 278   //
 279 
 280   // Some utilities.
 281   void print_size_transition(outputStream* out,
 282                              size_t bytes_before,
 283                              size_t bytes_after,
 284                              size_t capacity);
 285 };
 286 
 287 inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
 288   return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
 289 }
 290 
 291 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP


  87 //  and then to set it to 0 after that task has completed.  A value of
  88 //  0 is a "special" value in set_n_threads() which translates to
  89 //  setting _n_threads to 1.
  90 //
  91 //  Some code uses _n_termination to decide if work should be done in
  92 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  93 //  is an example of such code.  Look for variable "is_par" for other
  94 //  examples.
  95 //
  96 //  The active_workers is not reset to 0 after a parallel phase.  It's
  97 //  value may be used in later phases and in one instance at least
  98 //  (the parallel remark) it has to be used (the parallel remark depends
  99 //  on the partitioning done in the previous parallel scavenge).
 100 
 101 class SharedHeap : public CollectedHeap {
 102   friend class VMStructs;
 103 
 104   friend class VM_GC_Operation;
 105   friend class VM_CGC_Operation;
 106 




 107 protected:
 108   // There should be only a single instance of "SharedHeap" in a program.
 109   // This is enforced with the protected constructor below, which will also
 110   // set the static pointer "_sh" to that instance.
 111   static SharedHeap* _sh;
 112 
 113   // A gc policy, controls global gc resource issues
 114   CollectorPolicy *_collector_policy;
 115 
 116   // See the discussion below, in the specification of the reader function
 117   // for this variable.
 118   int _strong_roots_parity;
 119 
 120   // If we're doing parallel GC, use this gang of threads.
 121   FlexibleWorkGang* _workers;
 122 
 123   // Full initialization is done in a concrete subtype's "initialize"
 124   // function.
 125   SharedHeap(CollectorPolicy* policy_);
 126 
 127   // Returns true if the calling thread holds the heap lock,
 128   // or the calling thread is a par gc thread and the heap_lock is held
 129   // by the vm thread doing a gc operation.
 130   bool heap_lock_held_for_gc();
 131   // True if the heap_lock is held by the a non-gc thread invoking a gc
 132   // operation.
 133   bool _thread_holds_heap_lock_for_gc;
 134 
 135 public:
 136   static SharedHeap* heap() { return _sh; }
 137 
 138   void set_barrier_set(BarrierSet* bs);

 139 
 140   // Does operations required after initialization has been done.
 141   virtual void post_initialize();
 142 
 143   // Initialization of ("weak") reference processing support
 144   virtual void ref_processing_init();
 145 
 146   // Iteration functions.
 147   void oop_iterate(ExtendedOopClosure* cl) = 0;
 148 
 149   // Iterate over all spaces in use in the heap, in an undefined order.
 150   virtual void space_iterate(SpaceClosure* cl) = 0;
 151 
 152   // A SharedHeap will contain some number of spaces.  This finds the
 153   // space whose reserved area contains the given address, or else returns
 154   // NULL.
 155   virtual Space* space_containing(const void* addr) const = 0;
 156 
 157   bool no_gc_in_progress() { return !is_gc_active(); }
 158 


 171   // The idea is that objects representing fine-grained tasks, such as
 172   // threads, will contain a "parity" field.  A task will is claimed in the
 173   // current "process_roots" call only if its parity field is the
 174   // same as the "strong_roots_parity"; task claiming is accomplished by
 175   // updating the parity field to the strong_roots_parity with a CAS.
 176   //
 177   // If the client meats this spec, then strong_roots_parity() will have
 178   // the following properties:
 179   //   a) to return a different value than was returned before the last
 180   //      call to change_strong_roots_parity, and
 181   //   c) to never return a distinguished value (zero) with which such
 182   //      task-claiming variables may be initialized, to indicate "never
 183   //      claimed".
 184  public:
 185   int strong_roots_parity() { return _strong_roots_parity; }
 186 
 187   // Call these in sequential code around process_roots.
 188   // strong_roots_prologue calls change_strong_roots_parity, if
 189   // parallel tasks are enabled.
 190   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {



 191     SharedHeap*   _sh;

 192 
 193    public:
 194     StrongRootsScope(SharedHeap* heap, bool activate = true);






 195   };
 196   friend class StrongRootsScope;
 197 





 198  private:


 199   void change_strong_roots_parity();
 200 
 201  public:






 202   FlexibleWorkGang* workers() const { return _workers; }
 203 



























 204   // The functions below are helper functions that a subclass of
 205   // "SharedHeap" can use in the implementation of its virtual
 206   // functions.
 207 
 208 public:
 209 
 210   // Do anything common to GC's.
 211   virtual void gc_prologue(bool full) = 0;
 212   virtual void gc_epilogue(bool full) = 0;
 213 
 214   // Sets the number of parallel threads that will be doing tasks
 215   // (such as process roots) subsequently.
 216   virtual void set_par_threads(uint t);
 217 



 218   //
 219   // New methods from CollectedHeap
 220   //
 221 
 222   // Some utilities.
 223   void print_size_transition(outputStream* out,
 224                              size_t bytes_before,
 225                              size_t bytes_after,
 226                              size_t capacity);
 227 };




 228 
 229 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
< prev index next >