1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 27 28 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 29 #include "gc_implementation/parallelScavenge/objectStartArray.hpp" 30 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" 31 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 33 #include "gc_implementation/shared/gcPolicyCounters.hpp" 34 #include "gc_implementation/shared/gcWhen.hpp" 35 #include "gc_interface/collectedHeap.inline.hpp" 36 #include "memory/collectorPolicy.hpp" 37 #include "memory/strongRootsScope.hpp" 38 #include "utilities/ostream.hpp" 39 40 class AdjoiningGenerations; 41 class GCHeapSummary; 42 class GCTaskManager; 43 class PSAdaptiveSizePolicy; 44 class PSHeapSummary; 45 46 class ParallelScavengeHeap : public CollectedHeap { 47 friend class VMStructs; 48 private: 49 static PSYoungGen* _young_gen; 50 static PSOldGen* _old_gen; 51 52 // Sizing policy for entire heap 53 static PSAdaptiveSizePolicy* _size_policy; 54 static PSGCAdaptivePolicyCounters* _gc_policy_counters; 55 56 static ParallelScavengeHeap* _psh; 57 58 GenerationSizer* _collector_policy; 59 60 // Collection of generations that are adjacent in the 61 // space reserved for the heap. 62 AdjoiningGenerations* _gens; 63 unsigned int _death_march_count; 64 65 // The task manager 66 static GCTaskManager* _gc_task_manager; 67 68 void trace_heap(GCWhen::Type when, const GCTracer* tracer); 69 70 protected: 71 static inline size_t total_invocations(); 72 HeapWord* allocate_new_tlab(size_t size); 73 74 inline bool should_alloc_in_eden(size_t size) const; 75 inline void death_march_check(HeapWord* const result, size_t size); 76 HeapWord* mem_allocate_old_gen(size_t size); 77 78 public: 79 ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { } 80 81 // For use by VM operations 82 enum CollectionType { 83 Scavenge, 84 MarkSweep 85 }; 86 87 virtual Name kind() const { 88 return CollectedHeap::ParallelScavengeHeap; 89 } 90 91 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } 92 93 static PSYoungGen* young_gen() { return _young_gen; } 94 static PSOldGen* old_gen() { return _old_gen; } 95 96 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } 97 98 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; } 99 100 static ParallelScavengeHeap* heap(); 101 102 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; } 103 104 AdjoiningGenerations* gens() { return _gens; } 105 106 // Returns JNI_OK on success 107 virtual jint initialize(); 108 109 void post_initialize(); 110 void update_counters(); 111 112 // The alignment used for the various areas 113 size_t space_alignment() { return _collector_policy->space_alignment(); } 114 size_t generation_alignment() { return _collector_policy->gen_alignment(); } 115 116 // Return the (conservative) maximum heap alignment 117 static size_t conservative_max_heap_alignment() { 118 return CollectorPolicy::compute_heap_alignment(); 119 } 120 121 size_t capacity() const; 122 size_t used() const; 123 124 // Return "true" if all generations have reached the 125 // maximal committed limit that they can reach, without a garbage 126 // collection. 127 virtual bool is_maximal_no_gc() const; 128 129 // Return true if the reference points to an object that 130 // can be moved in a partial collection. For currently implemented 131 // generational collectors that means during a collection of 132 // the young gen. 133 virtual bool is_scavengable(const void* addr); 134 135 size_t max_capacity() const; 136 137 // Whether p is in the allocated part of the heap 138 bool is_in(const void* p) const; 139 140 bool is_in_reserved(const void* p) const; 141 142 bool is_in_young(oop p); // reserved part 143 bool is_in_old(oop p); // reserved part 144 145 // Memory allocation. "gc_time_limit_was_exceeded" will 146 // be set to true if the adaptive size policy determine that 147 // an excessive amount of time is being spent doing collections 148 // and caused a NULL to be returned. If a NULL is not returned, 149 // "gc_time_limit_was_exceeded" has an undefined meaning. 150 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 151 152 // Allocation attempt(s) during a safepoint. It should never be called 153 // to allocate a new TLAB as this allocation might be satisfied out 154 // of the old generation. 155 HeapWord* failed_mem_allocate(size_t size); 156 157 // Support for System.gc() 158 void collect(GCCause::Cause cause); 159 160 // These also should be called by the vm thread at a safepoint (e.g., from a 161 // VM operation). 162 // 163 // The first collects the young generation only, unless the scavenge fails; it 164 // will then attempt a full gc. The second collects the entire heap; if 165 // maximum_compaction is true, it will compact everything and clear all soft 166 // references. 167 inline void invoke_scavenge(); 168 169 // Perform a full collection 170 virtual void do_full_collection(bool clear_all_soft_refs); 171 172 bool supports_inline_contig_alloc() const { return !UseNUMA; } 173 174 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } 175 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } 176 177 void ensure_parsability(bool retire_tlabs); 178 void accumulate_statistics_all_tlabs(); 179 void resize_all_tlabs(); 180 181 bool supports_tlab_allocation() const { return true; } 182 183 size_t tlab_capacity(Thread* thr) const; 184 size_t tlab_used(Thread* thr) const; 185 size_t unsafe_max_tlab_alloc(Thread* thr) const; 186 187 // Can a compiler initialize a new object without store barriers? 188 // This permission only extends from the creation of a new object 189 // via a TLAB up to the first subsequent safepoint. 190 virtual bool can_elide_tlab_store_barriers() const { 191 return true; 192 } 193 194 virtual bool card_mark_must_follow_store() const { 195 return false; 196 } 197 198 // Return true if we don't we need a store barrier for 199 // initializing stores to an object at this address. 200 virtual bool can_elide_initializing_store_barrier(oop new_obj); 201 202 void object_iterate(ObjectClosure* cl); 203 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } 204 205 HeapWord* block_start(const void* addr) const; 206 size_t block_size(const HeapWord* addr) const; 207 bool block_is_obj(const HeapWord* addr) const; 208 209 jlong millis_since_last_gc(); 210 211 void prepare_for_verify(); 212 PSHeapSummary create_ps_heap_summary(); 213 virtual void print_on(outputStream* st) const; 214 virtual void print_on_error(outputStream* st) const; 215 virtual void print_gc_threads_on(outputStream* st) const; 216 virtual void gc_threads_do(ThreadClosure* tc) const; 217 virtual void print_tracing_info() const; 218 219 void verify(bool silent, VerifyOption option /* ignored */); 220 221 void print_heap_change(size_t prev_used); 222 223 // Resize the young generation. The reserved space for the 224 // generation may be expanded in preparation for the resize. 225 void resize_young_gen(size_t eden_size, size_t survivor_size); 226 227 // Resize the old generation. The reserved space for the 228 // generation may be expanded in preparation for the resize. 229 void resize_old_gen(size_t desired_free_space); 230 231 // Save the tops of the spaces in all generations 232 void record_gen_tops_before_GC() PRODUCT_RETURN; 233 234 // Mangle the unused parts of all spaces in the heap 235 void gen_mangle_unused_area() PRODUCT_RETURN; 236 237 // Call these in sequential code around the processing of strong roots. 238 class ParStrongRootsScope : public MarkScope { 239 public: 240 ParStrongRootsScope(); 241 ~ParStrongRootsScope(); 242 }; 243 }; 244 245 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP