1 /*
   2  * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
  26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP
  27 
  28 #include "gc_interface/collectedHeap.hpp"
  29 #include "memory/generation.hpp"
  30 
  31 // A "SharedHeap" is an implementation of a java heap for HotSpot.  This
  32 // is an abstract class: there may be many different kinds of heaps.  This
  33 // class defines the functions that a heap must implement, and contains
  34 // infrastructure common to all heaps.
  35 
  36 class Generation;
  37 class BarrierSet;
  38 class GenRemSet;
  39 class Space;
  40 class SpaceClosure;
  41 class OopClosure;
  42 class OopsInGenClosure;
  43 class ObjectClosure;
  44 class SubTasksDone;
  45 class WorkGang;
  46 class FlexibleWorkGang;
  47 class CollectorPolicy;
  48 class KlassClosure;
  49 class GCPhaseTimeTracker;
  50 
  51 // Note on use of FlexibleWorkGang's for GC.
  52 // There are three places where task completion is determined.
  53 // In
  54 //    1) ParallelTaskTerminator::offer_termination() where _n_threads
  55 //    must be set to the correct value so that count of workers that
  56 //    have offered termination will exactly match the number
  57 //    working on the task.  Tasks such as those derived from GCTask
  58 //    use ParallelTaskTerminator's.  Tasks that want load balancing
  59 //    by work stealing use this method to gauge completion.
  60 //    2) SubTasksDone has a variable _n_threads that is used in
  61 //    all_tasks_completed() to determine completion.  all_tasks_complete()
  62 //    counts the number of tasks that have been done and then reset
  63 //    the SubTasksDone so that it can be used again.  When the number of
  64 //    tasks is set to the number of GC workers, then _n_threads must
  65 //    be set to the number of active GC workers. G1CollectedHeap,
  66 //    HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
  67 //    This seems too many.
  68 //    3) SequentialSubTasksDone has an _n_threads that is used in
  69 //    a way similar to SubTasksDone and has the same dependency on the
  70 //    number of active GC workers.  CompactibleFreeListSpace and Space
  71 //    have SequentialSubTasksDone's.
  72 // Example of using SubTasksDone and SequentialSubTasksDone
  73 // G1CollectedHeap::g1_process_roots()
  74 //  to SharedHeap::process_roots() and uses
  75 //  SubTasksDone* _process_strong_tasks to claim tasks.
  76 //  process_roots() calls
  77 //      rem_set()->younger_refs_iterate()
  78 //  to scan the card table and which eventually calls down into
  79 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
  80 //  uses SequentialSubTasksDone* _pst to claim tasks.
  81 //  Both SubTasksDone and SequentialSubTasksDone call their method
  82 //  all_tasks_completed() to count the number of GC workers that have
  83 //  finished their work.  That logic is "when all the workers are
  84 //  finished the tasks are finished".
  85 //
  86 //  The pattern that appears  in the code is to set _n_threads
  87 //  to a value > 1 before a task that you would like executed in parallel
  88 //  and then to set it to 0 after that task has completed.  A value of
  89 //  0 is a "special" value in set_n_threads() which translates to
  90 //  setting _n_threads to 1.
  91 //
  92 //  Some code uses _n_termination to decide if work should be done in
  93 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  94 //  is an example of such code.  Look for variable "is_par" for other
  95 //  examples.
  96 //
  97 //  The active_workers is not reset to 0 after a parallel phase.  It's
  98 //  value may be used in later phases and in one instance at least
  99 //  (the parallel remark) it has to be used (the parallel remark depends
 100 //  on the partitioning done in the previous parallel scavenge).
 101 
 102 class SharedHeap : public CollectedHeap {
 103   friend class VMStructs;
 104 
 105   friend class VM_GC_Operation;
 106   friend class VM_CGC_Operation;
 107 public:
 108   // The set of potentially parallel tasks in root scanning.
 109   enum SH_process_roots_tasks {
 110     SH_PS_Threads_oops_do,
 111     SH_PS_StringTable_oops_do,
 112     SH_PS_Universe_oops_do,
 113     SH_PS_JNIHandles_oops_do,
 114     SH_PS_ObjectSynchronizer_oops_do,
 115     SH_PS_FlatProfiler_oops_do,
 116     SH_PS_Management_oops_do,
 117     SH_PS_SystemDictionary_oops_do,
 118     SH_PS_ClassLoaderDataGraph_oops_do,
 119     SH_PS_jvmti_oops_do,
 120     SH_PS_CodeCache_oops_do,
 121     // Leave this one last.
 122     SH_PS_NumElements
 123   };
 124 
 125   static const char* ext_roots_task_str(uint task);
 126 private:
 127   // For claiming strong_roots tasks.
 128   SubTasksDone* _process_strong_tasks;
 129 
 130 protected:
 131   // There should be only a single instance of "SharedHeap" in a program.
 132   // This is enforced with the protected constructor below, which will also
 133   // set the static pointer "_sh" to that instance.
 134   static SharedHeap* _sh;
 135 
 136   // A gc policy, controls global gc resource issues
 137   CollectorPolicy *_collector_policy;
 138 
 139   // See the discussion below, in the specification of the reader function
 140   // for this variable.
 141   int _strong_roots_parity;
 142 
 143   // If we're doing parallel GC, use this gang of threads.
 144   FlexibleWorkGang* _workers;
 145 
 146   // Full initialization is done in a concrete subtype's "initialize"
 147   // function.
 148   SharedHeap(CollectorPolicy* policy_);
 149 
 150   // Returns true if the calling thread holds the heap lock,
 151   // or the calling thread is a par gc thread and the heap_lock is held
 152   // by the vm thread doing a gc operation.
 153   bool heap_lock_held_for_gc();
 154   // True if the heap_lock is held by the a non-gc thread invoking a gc
 155   // operation.
 156   bool _thread_holds_heap_lock_for_gc;
 157 
 158 public:
 159   static SharedHeap* heap() { return _sh; }
 160 
 161   void set_barrier_set(BarrierSet* bs);
 162   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
 163 
 164   // Does operations required after initialization has been done.
 165   virtual void post_initialize();
 166 
 167   // Initialization of ("weak") reference processing support
 168   virtual void ref_processing_init();
 169 
 170   // Iteration functions.
 171   void oop_iterate(ExtendedOopClosure* cl) = 0;
 172 
 173   // Iterate over all spaces in use in the heap, in an undefined order.
 174   virtual void space_iterate(SpaceClosure* cl) = 0;
 175 
 176   // A SharedHeap will contain some number of spaces.  This finds the
 177   // space whose reserved area contains the given address, or else returns
 178   // NULL.
 179   virtual Space* space_containing(const void* addr) const = 0;
 180 
 181   bool no_gc_in_progress() { return !is_gc_active(); }
 182 
 183   // Some collectors will perform "process_strong_roots" in parallel.
 184   // Such a call will involve claiming some fine-grained tasks, such as
 185   // scanning of threads.  To make this process simpler, we provide the
 186   // "strong_roots_parity()" method.  Collectors that start parallel tasks
 187   // whose threads invoke "process_strong_roots" must
 188   // call "change_strong_roots_parity" in sequential code starting such a
 189   // task.  (This also means that a parallel thread may only call
 190   // process_strong_roots once.)
 191   //
 192   // For calls to process_roots by sequential code, the parity is
 193   // updated automatically.
 194   //
 195   // The idea is that objects representing fine-grained tasks, such as
 196   // threads, will contain a "parity" field.  A task will is claimed in the
 197   // current "process_roots" call only if its parity field is the
 198   // same as the "strong_roots_parity"; task claiming is accomplished by
 199   // updating the parity field to the strong_roots_parity with a CAS.
 200   //
 201   // If the client meats this spec, then strong_roots_parity() will have
 202   // the following properties:
 203   //   a) to return a different value than was returned before the last
 204   //      call to change_strong_roots_parity, and
 205   //   c) to never return a distinguished value (zero) with which such
 206   //      task-claiming variables may be initialized, to indicate "never
 207   //      claimed".
 208  public:
 209   int strong_roots_parity() { return _strong_roots_parity; }
 210 
 211   // Call these in sequential code around process_roots.
 212   // strong_roots_prologue calls change_strong_roots_parity, if
 213   // parallel tasks are enabled.
 214   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
 215     // Used to implement the Thread work barrier.
 216     static Monitor* _lock;
 217 
 218     SharedHeap*   _sh;
 219     volatile jint _n_workers_done_with_threads;
 220 
 221    public:
 222     StrongRootsScope(SharedHeap* heap, bool activate = true);
 223     ~StrongRootsScope();
 224 
 225     // Mark that this thread is done with the Threads work.
 226     void mark_worker_done_with_threads(uint n_workers);
 227     // Wait until all n_workers are done with the Threads work.
 228     void wait_until_all_workers_done_with_threads(uint n_workers);
 229   };
 230   friend class StrongRootsScope;
 231 
 232   // The current active StrongRootScope
 233   StrongRootsScope* _strong_roots_scope;
 234 
 235   StrongRootsScope* active_strong_roots_scope() const;
 236 
 237  private:
 238   void register_strong_roots_scope(StrongRootsScope* scope);
 239   void unregister_strong_roots_scope(StrongRootsScope* scope);
 240   void change_strong_roots_parity();
 241 
 242  public:
 243   enum ScanningOption {
 244     SO_None                =  0x0,
 245     SO_AllCodeCache        =  0x8,
 246     SO_ScavengeCodeCache   = 0x10
 247   };
 248 
 249   FlexibleWorkGang* workers() const { return _workers; }
 250 
 251   // Invoke the "do_oop" method the closure "roots" on all root locations.
 252   // The "so" argument determines which roots the closure is applied to:
 253   // "SO_None" does none;
 254   // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
 255   // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
 256   void process_roots(bool activate_scope,
 257                      ScanningOption so,
 258                      OopClosure* strong_roots,
 259                      OopClosure* weak_roots,
 260                      CLDClosure* strong_cld_closure,
 261                      CLDClosure* weak_cld_closure,
 262                      CodeBlobClosure* code_roots,
 263                      GCPhaseTimeTracker* phase_durations = NULL);
 264   void process_all_roots(bool activate_scope,
 265                          ScanningOption so,
 266                          OopClosure* roots,
 267                          CLDClosure* cld_closure,
 268                          CodeBlobClosure* code_roots,
 269                          GCPhaseTimeTracker* phase_durations = NULL);
 270   void process_strong_roots(bool activate_scope,
 271                             ScanningOption so,
 272                             OopClosure* roots,
 273                             CLDClosure* cld_closure,
 274                             CodeBlobClosure* code_roots,
 275                             GCPhaseTimeTracker* phase_durations = NULL);
 276 
 277 
 278   // Apply "root_closure" to the JNI weak roots..
 279   void process_weak_roots(OopClosure* root_closure);
 280 
 281   // The functions below are helper functions that a subclass of
 282   // "SharedHeap" can use in the implementation of its virtual
 283   // functions.
 284 
 285 public:
 286 
 287   // Do anything common to GC's.
 288   virtual void gc_prologue(bool full) = 0;
 289   virtual void gc_epilogue(bool full) = 0;
 290 
 291   // Sets the number of parallel threads that will be doing tasks
 292   // (such as process roots) subsequently.
 293   virtual void set_par_threads(uint t);
 294 
 295   int n_termination();
 296   void set_n_termination(int t);
 297 
 298   //
 299   // New methods from CollectedHeap
 300   //
 301 
 302   // Some utilities.
 303   void print_size_transition(outputStream* out,
 304                              size_t bytes_before,
 305                              size_t bytes_after,
 306                              size_t capacity);
 307 };
 308 
 309 inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
 310   return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
 311 }
 312 
 313 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP