< prev index next >

src/share/vm/memory/sharedHeap.hpp

Print this page




  44 class SubTasksDone;
  45 class WorkGang;
  46 class FlexibleWorkGang;
  47 class CollectorPolicy;
  48 class KlassClosure;
  49 
  50 // Note on use of FlexibleWorkGang's for GC.
  51 // There are three places where task completion is determined.
  52 // In
  53 //    1) ParallelTaskTerminator::offer_termination() where _n_threads
  54 //    must be set to the correct value so that count of workers that
  55 //    have offered termination will exactly match the number
  56 //    working on the task.  Tasks such as those derived from GCTask
  57 //    use ParallelTaskTerminator's.  Tasks that want load balancing
  58 //    by work stealing use this method to gauge completion.
  59 //    2) SubTasksDone has a variable _n_threads that is used in
  60 //    all_tasks_completed() to determine completion.  all_tasks_complete()
  61 //    counts the number of tasks that have been done and then reset
  62 //    the SubTasksDone so that it can be used again.  When the number of
  63 //    tasks is set to the number of GC workers, then _n_threads must
  64 //    be set to the number of active GC workers. G1CollectedHeap,
  65 //    HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
  66 //    This seems too many.
  67 //    3) SequentialSubTasksDone has an _n_threads that is used in
  68 //    a way similar to SubTasksDone and has the same dependency on the
  69 //    number of active GC workers.  CompactibleFreeListSpace and Space
  70 //    have SequentialSubTasksDone's.
  71 // Example of using SubTasksDone and SequentialSubTasksDone
  72 // G1CollectedHeap::g1_process_roots()
  73 //  to SharedHeap::process_roots() and uses
  74 //  SubTasksDone* _process_strong_tasks to claim tasks.
  75 //  process_roots() calls

  76 //      rem_set()->younger_refs_iterate()
  77 //  to scan the card table and which eventually calls down into
  78 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
  79 //  uses SequentialSubTasksDone* _pst to claim tasks.
  80 //  Both SubTasksDone and SequentialSubTasksDone call their method
  81 //  all_tasks_completed() to count the number of GC workers that have
  82 //  finished their work.  That logic is "when all the workers are
  83 //  finished the tasks are finished".
  84 //
  85 //  The pattern that appears  in the code is to set _n_threads
  86 //  to a value > 1 before a task that you would like executed in parallel
  87 //  and then to set it to 0 after that task has completed.  A value of
  88 //  0 is a "special" value in set_n_threads() which translates to
  89 //  setting _n_threads to 1.
  90 //
  91 //  Some code uses _n_terminiation to decide if work should be done in
  92 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  93 //  is an example of such code.  Look for variable "is_par" for other
  94 //  examples.
  95 //
  96 //  The active_workers is not reset to 0 after a parallel phase.  It's
  97 //  value may be used in later phases and in one instance at least
  98 //  (the parallel remark) it has to be used (the parallel remark depends
  99 //  on the partitioning done in the previous parallel scavenge).
 100 
 101 class SharedHeap : public CollectedHeap {
 102   friend class VMStructs;
 103 
 104   friend class VM_GC_Operation;
 105   friend class VM_CGC_Operation;
 106 
 107 private:
 108   // For claiming strong_roots tasks.
 109   SubTasksDone* _process_strong_tasks;
 110 
 111 protected:
 112   // There should be only a single instance of "SharedHeap" in a program.
 113   // This is enforced with the protected constructor below, which will also
 114   // set the static pointer "_sh" to that instance.
 115   static SharedHeap* _sh;
 116 
 117   // and the Gen Remembered Set, at least one good enough to scan the perm
 118   // gen.
 119   GenRemSet* _rem_set;
 120 
 121   // A gc policy, controls global gc resource issues
 122   CollectorPolicy *_collector_policy;
 123 
 124   // See the discussion below, in the specification of the reader function
 125   // for this variable.
 126   int _strong_roots_parity;
 127 
 128   // If we're doing parallel GC, use this gang of threads.
 129   FlexibleWorkGang* _workers;
 130 
 131   // Full initialization is done in a concrete subtype's "initialize"
 132   // function.
 133   SharedHeap(CollectorPolicy* policy_);
 134 
 135   // Returns true if the calling thread holds the heap lock,
 136   // or the calling thread is a par gc thread and the heap_lock is held
 137   // by the vm thread doing a gc operation.
 138   bool heap_lock_held_for_gc();
 139   // True if the heap_lock is held by the a non-gc thread invoking a gc
 140   // operation.
 141   bool _thread_holds_heap_lock_for_gc;
 142 
 143 public:
 144   static SharedHeap* heap() { return _sh; }
 145 
 146   void set_barrier_set(BarrierSet* bs);
 147   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
 148 
 149   // Does operations required after initialization has been done.
 150   virtual void post_initialize();
 151 
 152   // Initialization of ("weak") reference processing support
 153   virtual void ref_processing_init();
 154 
 155   // This function returns the "GenRemSet" object that allows us to scan
 156   // generations in a fully generational heap.
 157   GenRemSet* rem_set() { return _rem_set; }
 158 
 159   // Iteration functions.
 160   void oop_iterate(ExtendedOopClosure* cl) = 0;
 161 
 162   // Iterate over all spaces in use in the heap, in an undefined order.
 163   virtual void space_iterate(SpaceClosure* cl) = 0;
 164 
 165   // A SharedHeap will contain some number of spaces.  This finds the
 166   // space whose reserved area contains the given address, or else returns
 167   // NULL.


 184   // The idea is that objects representing fine-grained tasks, such as
 185   // threads, will contain a "parity" field.  A task will is claimed in the
 186   // current "process_roots" call only if its parity field is the
 187   // same as the "strong_roots_parity"; task claiming is accomplished by
 188   // updating the parity field to the strong_roots_parity with a CAS.
 189   //
 190   // If the client meats this spec, then strong_roots_parity() will have
 191   // the following properties:
 192   //   a) to return a different value than was returned before the last
 193   //      call to change_strong_roots_parity, and
 194   //   c) to never return a distinguished value (zero) with which such
 195   //      task-claiming variables may be initialized, to indicate "never
 196   //      claimed".
 197  public:
 198   int strong_roots_parity() { return _strong_roots_parity; }
 199 
 200   // Call these in sequential code around process_roots.
 201   // strong_roots_prologue calls change_strong_roots_parity, if
 202   // parallel tasks are enabled.
 203   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
 204     // Used to implement the Thread work barrier.
 205     static Monitor* _lock;
 206 
 207     SharedHeap*   _sh;
 208     volatile jint _n_workers_done_with_threads;
 209 
 210    public:
 211     StrongRootsScope(SharedHeap* heap, bool activate = true);
 212     ~StrongRootsScope();
 213 
 214     // Mark that this thread is done with the Threads work.
 215     void mark_worker_done_with_threads(uint n_workers);
 216     // Wait until all n_workers are done with the Threads work.
 217     void wait_until_all_workers_done_with_threads(uint n_workers);
 218   };
 219   friend class StrongRootsScope;
 220 
 221   // The current active StrongRootScope
 222   StrongRootsScope* _strong_roots_scope;
 223 
 224   StrongRootsScope* active_strong_roots_scope() const;
 225 
 226  private:
 227   void register_strong_roots_scope(StrongRootsScope* scope);
 228   void unregister_strong_roots_scope(StrongRootsScope* scope);
 229   void change_strong_roots_parity();
 230 
 231  public:
 232   enum ScanningOption {
 233     SO_None                =  0x0,
 234     SO_AllCodeCache        =  0x8,
 235     SO_ScavengeCodeCache   = 0x10
 236   };
 237 
 238   FlexibleWorkGang* workers() const { return _workers; }
 239 
 240   // Invoke the "do_oop" method the closure "roots" on all root locations.
 241   // The "so" argument determines which roots the closure is applied to:
 242   // "SO_None" does none;
 243   // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
 244   // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
 245   void process_roots(bool activate_scope,
 246                      ScanningOption so,
 247                      OopClosure* strong_roots,
 248                      OopClosure* weak_roots,
 249                      CLDClosure* strong_cld_closure,
 250                      CLDClosure* weak_cld_closure,
 251                      CodeBlobClosure* code_roots);
 252   void process_all_roots(bool activate_scope,
 253                          ScanningOption so,
 254                          OopClosure* roots,
 255                          CLDClosure* cld_closure,
 256                          CodeBlobClosure* code_roots);
 257   void process_strong_roots(bool activate_scope,
 258                             ScanningOption so,
 259                             OopClosure* roots,
 260                             CLDClosure* cld_closure,
 261                             CodeBlobClosure* code_roots);
 262 
 263 
 264   // Apply "root_closure" to the JNI weak roots..
 265   void process_weak_roots(OopClosure* root_closure);
 266 
 267   // The functions below are helper functions that a subclass of
 268   // "SharedHeap" can use in the implementation of its virtual
 269   // functions.
 270 
 271 public:
 272 
 273   // Do anything common to GC's.
 274   virtual void gc_prologue(bool full) = 0;
 275   virtual void gc_epilogue(bool full) = 0;
 276 
 277   // Sets the number of parallel threads that will be doing tasks
 278   // (such as process roots) subsequently.
 279   virtual void set_par_threads(uint t);
 280 
 281   int n_termination();
 282   void set_n_termination(int t);
 283 
 284   //
 285   // New methods from CollectedHeap
 286   //
 287 
 288   // Some utilities.
 289   void print_size_transition(outputStream* out,
 290                              size_t bytes_before,
 291                              size_t bytes_after,
 292                              size_t capacity);
 293 };
 294 
 295 inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
 296   return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
 297 }
 298 
 299 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP


  44 class SubTasksDone;
  45 class WorkGang;
  46 class FlexibleWorkGang;
  47 class CollectorPolicy;
  48 class KlassClosure;
  49 
  50 // Note on use of FlexibleWorkGang's for GC.
  51 // There are three places where task completion is determined.
  52 // In
  53 //    1) ParallelTaskTerminator::offer_termination() where _n_threads
  54 //    must be set to the correct value so that count of workers that
  55 //    have offered termination will exactly match the number
  56 //    working on the task.  Tasks such as those derived from GCTask
  57 //    use ParallelTaskTerminator's.  Tasks that want load balancing
  58 //    by work stealing use this method to gauge completion.
  59 //    2) SubTasksDone has a variable _n_threads that is used in
  60 //    all_tasks_completed() to determine completion.  all_tasks_complete()
  61 //    counts the number of tasks that have been done and then reset
  62 //    the SubTasksDone so that it can be used again.  When the number of
  63 //    tasks is set to the number of GC workers, then _n_threads must
  64 //    be set to the number of active GC workers. G1RootProcessor and
  65 //    GenCollectedHeap have SubTasksDone.

  66 //    3) SequentialSubTasksDone has an _n_threads that is used in
  67 //    a way similar to SubTasksDone and has the same dependency on the
  68 //    number of active GC workers.  CompactibleFreeListSpace and Space
  69 //    have SequentialSubTasksDone's.
  70 //
  71 // Examples of using SubTasksDone and SequentialSubTasksDone:
  72 //  G1RootProcessor and GenCollectedHeap::process_roots() use
  73 //  SubTasksDone* _process_strong_tasks to claim tasks for workers
  74 //
  75 //  GenCollectedHeap::gen_process_roots() calls
  76 //      rem_set()->younger_refs_iterate()
  77 //  to scan the card table and which eventually calls down into
  78 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
  79 //  uses SequentialSubTasksDone* _pst to claim tasks.
  80 //  Both SubTasksDone and SequentialSubTasksDone call their method
  81 //  all_tasks_completed() to count the number of GC workers that have
  82 //  finished their work.  That logic is "when all the workers are
  83 //  finished the tasks are finished".
  84 //
  85 //  The pattern that appears  in the code is to set _n_threads
  86 //  to a value > 1 before a task that you would like executed in parallel
  87 //  and then to set it to 0 after that task has completed.  A value of
  88 //  0 is a "special" value in set_n_threads() which translates to
  89 //  setting _n_threads to 1.
  90 //
  91 //  Some code uses _n_terminiation to decide if work should be done in
  92 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  93 //  is an example of such code.  Look for variable "is_par" for other
  94 //  examples.
  95 //
  96 //  The active_workers is not reset to 0 after a parallel phase.  It's
  97 //  value may be used in later phases and in one instance at least
  98 //  (the parallel remark) it has to be used (the parallel remark depends
  99 //  on the partitioning done in the previous parallel scavenge).
 100 
 101 class SharedHeap : public CollectedHeap {
 102   friend class VMStructs;
 103 
 104   friend class VM_GC_Operation;
 105   friend class VM_CGC_Operation;
 106 




 107 protected:
 108   // There should be only a single instance of "SharedHeap" in a program.
 109   // This is enforced with the protected constructor below, which will also
 110   // set the static pointer "_sh" to that instance.
 111   static SharedHeap* _sh;
 112 
 113   // and the Gen Remembered Set, at least one good enough to scan the perm
 114   // gen.
 115   GenRemSet* _rem_set;
 116 
 117   // A gc policy, controls global gc resource issues
 118   CollectorPolicy *_collector_policy;
 119 
 120   // See the discussion below, in the specification of the reader function
 121   // for this variable.
 122   int _strong_roots_parity;
 123 
 124   // If we're doing parallel GC, use this gang of threads.
 125   FlexibleWorkGang* _workers;
 126 
 127   // Full initialization is done in a concrete subtype's "initialize"
 128   // function.
 129   SharedHeap(CollectorPolicy* policy_);
 130 
 131   // Returns true if the calling thread holds the heap lock,
 132   // or the calling thread is a par gc thread and the heap_lock is held
 133   // by the vm thread doing a gc operation.
 134   bool heap_lock_held_for_gc();
 135   // True if the heap_lock is held by the a non-gc thread invoking a gc
 136   // operation.
 137   bool _thread_holds_heap_lock_for_gc;
 138 
 139 public:
 140   static SharedHeap* heap() { return _sh; }
 141 
 142   void set_barrier_set(BarrierSet* bs);

 143 
 144   // Does operations required after initialization has been done.
 145   virtual void post_initialize();
 146 
 147   // Initialization of ("weak") reference processing support
 148   virtual void ref_processing_init();
 149 
 150   // This function returns the "GenRemSet" object that allows us to scan
 151   // generations in a fully generational heap.
 152   GenRemSet* rem_set() { return _rem_set; }
 153 
 154   // Iteration functions.
 155   void oop_iterate(ExtendedOopClosure* cl) = 0;
 156 
 157   // Iterate over all spaces in use in the heap, in an undefined order.
 158   virtual void space_iterate(SpaceClosure* cl) = 0;
 159 
 160   // A SharedHeap will contain some number of spaces.  This finds the
 161   // space whose reserved area contains the given address, or else returns
 162   // NULL.


 179   // The idea is that objects representing fine-grained tasks, such as
 180   // threads, will contain a "parity" field.  A task will is claimed in the
 181   // current "process_roots" call only if its parity field is the
 182   // same as the "strong_roots_parity"; task claiming is accomplished by
 183   // updating the parity field to the strong_roots_parity with a CAS.
 184   //
 185   // If the client meats this spec, then strong_roots_parity() will have
 186   // the following properties:
 187   //   a) to return a different value than was returned before the last
 188   //      call to change_strong_roots_parity, and
 189   //   c) to never return a distinguished value (zero) with which such
 190   //      task-claiming variables may be initialized, to indicate "never
 191   //      claimed".
 192  public:
 193   int strong_roots_parity() { return _strong_roots_parity; }
 194 
 195   // Call these in sequential code around process_roots.
 196   // strong_roots_prologue calls change_strong_roots_parity, if
 197   // parallel tasks are enabled.
 198   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {



 199     SharedHeap*   _sh;

 200 
 201    public:
 202     StrongRootsScope(SharedHeap* heap, bool activate = true);






 203   };
 204   friend class StrongRootsScope;
 205 





 206  private:


 207   void change_strong_roots_parity();
 208 
 209  public:






 210   FlexibleWorkGang* workers() const { return _workers; }
 211 



























 212   // The functions below are helper functions that a subclass of
 213   // "SharedHeap" can use in the implementation of its virtual
 214   // functions.
 215 
 216 public:
 217 
 218   // Do anything common to GC's.
 219   virtual void gc_prologue(bool full) = 0;
 220   virtual void gc_epilogue(bool full) = 0;
 221 
 222   // Sets the number of parallel threads that will be doing tasks
 223   // (such as process roots) subsequently.
 224   virtual void set_par_threads(uint t);
 225 



 226   //
 227   // New methods from CollectedHeap
 228   //
 229 
 230   // Some utilities.
 231   void print_size_transition(outputStream* out,
 232                              size_t bytes_before,
 233                              size_t bytes_after,
 234                              size_t capacity);
 235 };




 236 
 237 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
< prev index next >