1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
  26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP
  27 
  28 #include "gc_interface/collectedHeap.hpp"
  29 
  30 // A "SharedHeap" is an implementation of a java heap for HotSpot.  This
  31 // is an abstract class: there may be many different kinds of heaps.  This
  32 // class defines the functions that a heap must implement, and contains
  33 // infrastructure common to all heaps.
  34 
  35 class FlexibleWorkGang;
  36 
  37 // Note on use of FlexibleWorkGang's for GC.
  38 // There are three places where task completion is determined.
  39 // In
  40 //    1) ParallelTaskTerminator::offer_termination() where _n_threads
  41 //    must be set to the correct value so that count of workers that
  42 //    have offered termination will exactly match the number
  43 //    working on the task.  Tasks such as those derived from GCTask
  44 //    use ParallelTaskTerminator's.  Tasks that want load balancing
  45 //    by work stealing use this method to gauge completion.
  46 //    2) SubTasksDone has a variable _n_threads that is used in
  47 //    all_tasks_completed() to determine completion.  all_tasks_complete()
  48 //    counts the number of tasks that have been done and then reset
  49 //    the SubTasksDone so that it can be used again.  When the number of
  50 //    tasks is set to the number of GC workers, then _n_threads must
  51 //    be set to the number of active GC workers. G1RootProcessor and
  52 //    GenCollectedHeap have SubTasksDone.
  53 //    3) SequentialSubTasksDone has an _n_threads that is used in
  54 //    a way similar to SubTasksDone and has the same dependency on the
  55 //    number of active GC workers.  CompactibleFreeListSpace and Space
  56 //    have SequentialSubTasksDone's.
  57 //
  58 // Examples of using SubTasksDone and SequentialSubTasksDone:
  59 //  G1RootProcessor and GenCollectedHeap::process_roots() use
  60 //  SubTasksDone* _process_strong_tasks to claim tasks for workers
  61 //
  62 //  GenCollectedHeap::gen_process_roots() calls
  63 //      rem_set()->younger_refs_iterate()
  64 //  to scan the card table and which eventually calls down into
  65 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
  66 //  uses SequentialSubTasksDone* _pst to claim tasks.
  67 //  Both SubTasksDone and SequentialSubTasksDone call their method
  68 //  all_tasks_completed() to count the number of GC workers that have
  69 //  finished their work.  That logic is "when all the workers are
  70 //  finished the tasks are finished".
  71 //
  72 //  The pattern that appears  in the code is to set _n_threads
  73 //  to a value > 1 before a task that you would like executed in parallel
  74 //  and then to set it to 0 after that task has completed.  A value of
  75 //  0 is a "special" value in set_n_threads() which translates to
  76 //  setting _n_threads to 1.
  77 //
  78 //  Some code uses _n_termination to decide if work should be done in
  79 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  80 //  is an example of such code.  Look for variable "is_par" for other
  81 //  examples.
  82 //
  83 //  The active_workers is not reset to 0 after a parallel phase.  It's
  84 //  value may be used in later phases and in one instance at least
  85 //  (the parallel remark) it has to be used (the parallel remark depends
  86 //  on the partitioning done in the previous parallel scavenge).
  87 
  88 class SharedHeap : public CollectedHeap {
  89   friend class VMStructs;
  90 
  91 protected:
  92   // If we're doing parallel GC, use this gang of threads.
  93   FlexibleWorkGang* _workers;
  94 
  95   // Full initialization is done in a concrete subtype's "initialize"
  96   // function.
  97   SharedHeap();
  98 
  99 public:
 100   // Note, the below comment needs to be updated to reflect the changes
 101   // introduced by JDK-8076225. This should be done as part of JDK-8076289.
 102   //
 103   //Some collectors will perform "process_strong_roots" in parallel.
 104   // Such a call will involve claiming some fine-grained tasks, such as
 105   // scanning of threads.  To make this process simpler, we provide the
 106   // "strong_roots_parity()" method.  Collectors that start parallel tasks
 107   // whose threads invoke "process_strong_roots" must
 108   // call "change_strong_roots_parity" in sequential code starting such a
 109   // task.  (This also means that a parallel thread may only call
 110   // process_strong_roots once.)
 111   //
 112   // For calls to process_roots by sequential code, the parity is
 113   // updated automatically.
 114   //
 115   // The idea is that objects representing fine-grained tasks, such as
 116   // threads, will contain a "parity" field.  A task will is claimed in the
 117   // current "process_roots" call only if its parity field is the
 118   // same as the "strong_roots_parity"; task claiming is accomplished by
 119   // updating the parity field to the strong_roots_parity with a CAS.
 120   //
 121   // If the client meats this spec, then strong_roots_parity() will have
 122   // the following properties:
 123   //   a) to return a different value than was returned before the last
 124   //      call to change_strong_roots_parity, and
 125   //   c) to never return a distinguished value (zero) with which such
 126   //      task-claiming variables may be initialized, to indicate "never
 127   //      claimed".
 128  public:
 129 
 130   // Call these in sequential code around process_roots.
 131   // strong_roots_prologue calls change_strong_roots_parity, if
 132   // parallel tasks are enabled.
 133   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
 134     SharedHeap*   _sh;
 135 
 136    public:
 137     StrongRootsScope(SharedHeap* heap, bool activate = true);
 138     ~StrongRootsScope();
 139   };
 140 
 141  private:
 142 
 143  public:
 144   FlexibleWorkGang* workers() const { return _workers; }
 145 
 146   // The functions below are helper functions that a subclass of
 147   // "SharedHeap" can use in the implementation of its virtual
 148   // functions.
 149 
 150 public:
 151   // Sets the number of parallel threads that will be doing tasks
 152   // (such as process roots) subsequently.
 153   virtual void set_par_threads(uint t);
 154 };
 155 
 156 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP