1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP 26 #define SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP 27 28 #include "gc/parallel/objectStartArray.hpp" 29 #include "gc/parallel/psGCAdaptivePolicyCounters.hpp" 30 #include "gc/parallel/psOldGen.hpp" 31 #include "gc/parallel/psYoungGen.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "gc/shared/collectedHeap.hpp" 34 #include "gc/shared/gcPolicyCounters.hpp" 35 #include "gc/shared/gcWhen.hpp" 36 #include "gc/shared/preGCValues.hpp" 37 #include "gc/shared/referenceProcessor.hpp" 38 #include "gc/shared/softRefPolicy.hpp" 39 #include "gc/shared/strongRootsScope.hpp" 40 #include "gc/shared/workgroup.hpp" 41 #include "logging/log.hpp" 42 #include "memory/metaspace.hpp" 43 #include "utilities/growableArray.hpp" 44 #include "utilities/ostream.hpp" 45 46 class AdjoiningGenerations; 47 class GCHeapSummary; 48 class MemoryManager; 49 class MemoryPool; 50 class PSAdaptiveSizePolicy; 51 class PSCardTable; 52 class PSHeapSummary; 53 54 class ParallelScavengeHeap : public CollectedHeap { 55 friend class VMStructs; 56 private: 57 static PSYoungGen* _young_gen; 58 static PSOldGen* _old_gen; 59 60 // Sizing policy for entire heap 61 static PSAdaptiveSizePolicy* _size_policy; 62 static PSGCAdaptivePolicyCounters* _gc_policy_counters; 63 64 SoftRefPolicy _soft_ref_policy; 65 66 unsigned int _death_march_count; 67 68 GCMemoryManager* _young_manager; 69 GCMemoryManager* _old_manager; 70 71 MemoryPool* _eden_pool; 72 MemoryPool* _survivor_pool; 73 MemoryPool* _old_pool; 74 75 WorkGang _workers; 76 77 virtual void initialize_serviceability(); 78 79 void trace_heap(GCWhen::Type when, const GCTracer* tracer); 80 81 protected: 82 static inline size_t total_invocations(); 83 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size); 84 85 inline bool should_alloc_in_eden(size_t size) const; 86 inline void death_march_check(HeapWord* const result, size_t size); 87 HeapWord* mem_allocate_old_gen(size_t size); 88 89 public: 90 ParallelScavengeHeap() : 91 CollectedHeap(), 92 _death_march_count(0), 93 _young_manager(NULL), 94 _old_manager(NULL), 95 _eden_pool(NULL), 96 _survivor_pool(NULL), 97 _old_pool(NULL), 98 _workers("GC Thread", 99 ParallelGCThreads, 100 true /* are_GC_task_threads */, 101 false /* are_ConcurrentGC_threads */) { } 102 103 // For use by VM operations 104 enum CollectionType { 105 Scavenge, 106 MarkSweep 107 }; 108 109 virtual Name kind() const { 110 return CollectedHeap::Parallel; 111 } 112 113 virtual const char* name() const { 114 return "Parallel"; 115 } 116 117 virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; } 118 119 virtual GrowableArray<GCMemoryManager*> memory_managers(); 120 virtual GrowableArray<MemoryPool*> memory_pools(); 121 122 static PSYoungGen* young_gen() { return _young_gen; } 123 static PSOldGen* old_gen() { return _old_gen; } 124 125 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } 126 127 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; } 128 129 static ParallelScavengeHeap* heap(); 130 131 CardTableBarrierSet* barrier_set(); 132 PSCardTable* card_table(); 133 134 // Returns JNI_OK on success 135 virtual jint initialize(); 136 137 void post_initialize(); 138 void update_counters(); 139 140 size_t capacity() const; 141 size_t used() const; 142 143 // Return "true" if all generations have reached the 144 // maximal committed limit that they can reach, without a garbage 145 // collection. 146 virtual bool is_maximal_no_gc() const; 147 148 virtual void register_nmethod(nmethod* nm); 149 virtual void unregister_nmethod(nmethod* nm); 150 virtual void verify_nmethod(nmethod* nm); 151 virtual void flush_nmethod(nmethod* nm); 152 153 void prune_scavengable_nmethods(); 154 155 size_t max_capacity() const; 156 157 // Whether p is in the allocated part of the heap 158 bool is_in(const void* p) const; 159 160 bool is_in_reserved(const void* p) const; 161 162 bool is_in_young(oop p); // reserved part 163 bool is_in_old(oop p); // reserved part 164 165 MemRegion reserved_region() const { return _reserved; } 166 HeapWord* base() const { return _reserved.start(); } 167 168 // Memory allocation. "gc_time_limit_was_exceeded" will 169 // be set to true if the adaptive size policy determine that 170 // an excessive amount of time is being spent doing collections 171 // and caused a NULL to be returned. If a NULL is not returned, 172 // "gc_time_limit_was_exceeded" has an undefined meaning. 173 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 174 175 // Allocation attempt(s) during a safepoint. It should never be called 176 // to allocate a new TLAB as this allocation might be satisfied out 177 // of the old generation. 178 HeapWord* failed_mem_allocate(size_t size); 179 180 // Support for System.gc() 181 void collect(GCCause::Cause cause); 182 183 // These also should be called by the vm thread at a safepoint (e.g., from a 184 // VM operation). 185 // 186 // The first collects the young generation only, unless the scavenge fails; it 187 // will then attempt a full gc. The second collects the entire heap; if 188 // maximum_compaction is true, it will compact everything and clear all soft 189 // references. 190 inline void invoke_scavenge(); 191 192 // Perform a full collection 193 virtual void do_full_collection(bool clear_all_soft_refs); 194 195 bool supports_inline_contig_alloc() const { return !UseNUMA; } 196 197 HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; } 198 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } 199 200 void ensure_parsability(bool retire_tlabs); 201 void resize_all_tlabs(); 202 203 bool supports_tlab_allocation() const { return true; } 204 205 size_t tlab_capacity(Thread* thr) const; 206 size_t tlab_used(Thread* thr) const; 207 size_t unsafe_max_tlab_alloc(Thread* thr) const; 208 209 void object_iterate(ObjectClosure* cl); 210 211 HeapWord* block_start(const void* addr) const; 212 bool block_is_obj(const HeapWord* addr) const; 213 214 jlong millis_since_last_gc(); 215 216 void prepare_for_verify(); 217 PSHeapSummary create_ps_heap_summary(); 218 virtual void print_on(outputStream* st) const; 219 virtual void print_on_error(outputStream* st) const; 220 virtual void print_gc_threads_on(outputStream* st) const; 221 virtual void gc_threads_do(ThreadClosure* tc) const; 222 virtual void print_tracing_info() const; 223 224 virtual WorkGang* get_safepoint_workers() { return &_workers; } 225 226 PreGenGCValues get_pre_gc_values() const; 227 void print_heap_change(const PreGenGCValues& pre_gc_values) const; 228 229 // Used to print information about locations in the hs_err file. 230 virtual bool print_location(outputStream* st, void* addr) const; 231 232 void verify(VerifyOption option /* ignored */); 233 234 // Resize the young generation. The reserved space for the 235 // generation may be expanded in preparation for the resize. 236 void resize_young_gen(size_t eden_size, size_t survivor_size); 237 238 // Resize the old generation. The reserved space for the 239 // generation may be expanded in preparation for the resize. 240 void resize_old_gen(size_t desired_free_space); 241 242 // Save the tops of the spaces in all generations 243 void record_gen_tops_before_GC() PRODUCT_RETURN; 244 245 // Mangle the unused parts of all spaces in the heap 246 void gen_mangle_unused_area() PRODUCT_RETURN; 247 248 // Call these in sequential code around the processing of strong roots. 249 class ParStrongRootsScope : public MarkScope { 250 public: 251 ParStrongRootsScope(); 252 ~ParStrongRootsScope(); 253 }; 254 255 GCMemoryManager* old_gc_manager() const { return _old_manager; } 256 GCMemoryManager* young_gc_manager() const { return _young_manager; } 257 258 WorkGang& workers() { 259 return _workers; 260 } 261 262 // Runs the given AbstractGangTask with the current active workers, returning the 263 // total time taken. 264 virtual Tickspan run_task(AbstractGangTask* task); 265 }; 266 267 // Class that can be used to print information about the 268 // adaptive size policy at intervals specified by 269 // AdaptiveSizePolicyOutputInterval. Only print information 270 // if an adaptive size policy is in use. 271 class AdaptiveSizePolicyOutput : AllStatic { 272 static bool enabled() { 273 return UseParallelGC && 274 UseAdaptiveSizePolicy && 275 log_is_enabled(Debug, gc, ergo); 276 } 277 public: 278 static void print() { 279 if (enabled()) { 280 ParallelScavengeHeap::heap()->size_policy()->print(); 281 } 282 } 283 284 static void print(AdaptiveSizePolicy* size_policy, uint count) { 285 bool do_print = 286 enabled() && 287 (AdaptiveSizePolicyOutputInterval > 0) && 288 (count % AdaptiveSizePolicyOutputInterval) == 0; 289 290 if (do_print) { 291 size_policy->print(); 292 } 293 } 294 }; 295 296 #endif // SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP