1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP 26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP 27 28 #include "gc_interface/collectedHeap.hpp" 29 #include "memory/generation.hpp" 30 31 // A "SharedHeap" is an implementation of a java heap for HotSpot. This 32 // is an abstract class: there may be many different kinds of heaps. This 33 // class defines the functions that a heap must implement, and contains 34 // infrastructure common to all heaps. 35 36 class Generation; 37 class BarrierSet; 38 class GenRemSet; 39 class Space; 40 class SpaceClosure; 41 class OopClosure; 42 class OopsInGenClosure; 43 class ObjectClosure; 44 class SubTasksDone; 45 class WorkGang; 46 class FlexibleWorkGang; 47 class CollectorPolicy; 48 class KlassClosure; 49 50 // Note on use of FlexibleWorkGang's for GC. 51 // There are three places where task completion is determined. 52 // In 53 // 1) ParallelTaskTerminator::offer_termination() where _n_threads 54 // must be set to the correct value so that count of workers that 55 // have offered termination will exactly match the number 56 // working on the task. Tasks such as those derived from GCTask 57 // use ParallelTaskTerminator's. Tasks that want load balancing 58 // by work stealing use this method to gauge completion. 59 // 2) SubTasksDone has a variable _n_threads that is used in 60 // all_tasks_completed() to determine completion. all_tasks_complete() 61 // counts the number of tasks that have been done and then reset 62 // the SubTasksDone so that it can be used again. When the number of 63 // tasks is set to the number of GC workers, then _n_threads must 64 // be set to the number of active GC workers. G1RootProcessor and 65 // GenCollectedHeap have SubTasksDone. 66 // 3) SequentialSubTasksDone has an _n_threads that is used in 67 // a way similar to SubTasksDone and has the same dependency on the 68 // number of active GC workers. CompactibleFreeListSpace and Space 69 // have SequentialSubTasksDone's. 70 // 71 // Examples of using SubTasksDone and SequentialSubTasksDone: 72 // G1RootProcessor and GenCollectedHeap::process_roots() use 73 // SubTasksDone* _process_strong_tasks to claim tasks for workers 74 // 75 // GenCollectedHeap::gen_process_roots() calls 76 // rem_set()->younger_refs_iterate() 77 // to scan the card table and which eventually calls down into 78 // CardTableModRefBS::par_non_clean_card_iterate_work(). This method 79 // uses SequentialSubTasksDone* _pst to claim tasks. 80 // Both SubTasksDone and SequentialSubTasksDone call their method 81 // all_tasks_completed() to count the number of GC workers that have 82 // finished their work. That logic is "when all the workers are 83 // finished the tasks are finished". 84 // 85 // The pattern that appears in the code is to set _n_threads 86 // to a value > 1 before a task that you would like executed in parallel 87 // and then to set it to 0 after that task has completed. A value of 88 // 0 is a "special" value in set_n_threads() which translates to 89 // setting _n_threads to 1. 90 // 91 // Some code uses _n_termination to decide if work should be done in 92 // parallel. The notorious possibly_parallel_oops_do() in threads.cpp 93 // is an example of such code. Look for variable "is_par" for other 94 // examples. 95 // 96 // The active_workers is not reset to 0 after a parallel phase. It's 97 // value may be used in later phases and in one instance at least 98 // (the parallel remark) it has to be used (the parallel remark depends 99 // on the partitioning done in the previous parallel scavenge). 100 101 class SharedHeap : public CollectedHeap { 102 friend class VMStructs; 103 104 friend class VM_GC_Operation; 105 friend class VM_CGC_Operation; 106 107 protected: 108 // If we're doing parallel GC, use this gang of threads. 109 FlexibleWorkGang* _workers; 110 111 // Full initialization is done in a concrete subtype's "initialize" 112 // function. 113 SharedHeap(); 114 115 public: 116 // Iteration functions. 117 void oop_iterate(ExtendedOopClosure* cl) = 0; 118 119 // A SharedHeap will contain some number of spaces. This finds the 120 // space whose reserved area contains the given address, or else returns 121 // NULL. 122 virtual Space* space_containing(const void* addr) const = 0; 123 124 // Note, the below comment needs to be updated to reflect the changes 125 // introduced by JDK-8076225. This should be done as part of JDK-8076289. 126 // 127 //Some collectors will perform "process_strong_roots" in parallel. 128 // Such a call will involve claiming some fine-grained tasks, such as 129 // scanning of threads. To make this process simpler, we provide the 130 // "strong_roots_parity()" method. Collectors that start parallel tasks 131 // whose threads invoke "process_strong_roots" must 132 // call "change_strong_roots_parity" in sequential code starting such a 133 // task. (This also means that a parallel thread may only call 134 // process_strong_roots once.) 135 // 136 // For calls to process_roots by sequential code, the parity is 137 // updated automatically. 138 // 139 // The idea is that objects representing fine-grained tasks, such as 140 // threads, will contain a "parity" field. A task will is claimed in the 141 // current "process_roots" call only if its parity field is the 142 // same as the "strong_roots_parity"; task claiming is accomplished by 143 // updating the parity field to the strong_roots_parity with a CAS. 144 // 145 // If the client meats this spec, then strong_roots_parity() will have 146 // the following properties: 147 // a) to return a different value than was returned before the last 148 // call to change_strong_roots_parity, and 149 // c) to never return a distinguished value (zero) with which such 150 // task-claiming variables may be initialized, to indicate "never 151 // claimed". 152 public: 153 154 // Call these in sequential code around process_roots. 155 // strong_roots_prologue calls change_strong_roots_parity, if 156 // parallel tasks are enabled. 157 class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope { 158 SharedHeap* _sh; 159 160 public: 161 StrongRootsScope(SharedHeap* heap, bool activate = true); 162 ~StrongRootsScope(); 163 }; 164 165 private: 166 167 public: 168 FlexibleWorkGang* workers() const { return _workers; } 169 170 // The functions below are helper functions that a subclass of 171 // "SharedHeap" can use in the implementation of its virtual 172 // functions. 173 174 public: 175 // Sets the number of parallel threads that will be doing tasks 176 // (such as process roots) subsequently. 177 virtual void set_par_threads(uint t); 178 }; 179 180 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP