1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 27 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp" 29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" 30 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 31 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 32 #include "gc_implementation/shared/gcPolicyCounters.hpp" 33 #include "gc_implementation/shared/gcWhen.hpp" 34 #include "gc_interface/collectedHeap.inline.hpp" 35 #include "utilities/ostream.hpp" 36 37 class AdjoiningGenerations; 38 class GCHeapSummary; 39 class GCTaskManager; 40 class GenerationSizer; 41 class CollectorPolicy; 42 class PSAdaptiveSizePolicy; 43 class PSHeapSummary; 44 45 class ParallelScavengeHeap : public CollectedHeap { 46 friend class VMStructs; 47 private: 48 static PSYoungGen* _young_gen; 49 static PSOldGen* _old_gen; 50 51 // Sizing policy for entire heap 52 static PSAdaptiveSizePolicy* _size_policy; 53 static PSGCAdaptivePolicyCounters* _gc_policy_counters; 54 55 static ParallelScavengeHeap* _psh; 56 57 size_t _young_gen_alignment; 58 size_t _old_gen_alignment; 59 60 GenerationSizer* _collector_policy; 61 62 inline size_t set_alignment(size_t& var, size_t val); 63 64 // Collection of generations that are adjacent in the 65 // space reserved for the heap. 66 AdjoiningGenerations* _gens; 67 unsigned int _death_march_count; 68 69 // The task manager 70 static GCTaskManager* _gc_task_manager; 71 72 void trace_heap(GCWhen::Type when, GCTracer* tracer); 73 74 protected: 75 static inline size_t total_invocations(); 76 HeapWord* allocate_new_tlab(size_t size); 77 78 inline bool should_alloc_in_eden(size_t size) const; 79 inline void death_march_check(HeapWord* const result, size_t size); 80 HeapWord* mem_allocate_old_gen(size_t size); 81 82 public: 83 ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { 84 set_alignment(_young_gen_alignment, intra_heap_alignment()); 85 set_alignment(_old_gen_alignment, intra_heap_alignment()); 86 } 87 88 // Return the (conservative) maximum heap alignment 89 static size_t conservative_max_heap_alignment() { 90 return GenCollectorPolicy::intra_heap_alignment(); 91 } 92 93 // For use by VM operations 94 enum CollectionType { 95 Scavenge, 96 MarkSweep 97 }; 98 99 ParallelScavengeHeap::Name kind() const { 100 return CollectedHeap::ParallelScavengeHeap; 101 } 102 103 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } 104 105 static PSYoungGen* young_gen() { return _young_gen; } 106 static PSOldGen* old_gen() { return _old_gen; } 107 108 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } 109 110 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; } 111 112 static ParallelScavengeHeap* heap(); 113 114 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; } 115 116 AdjoiningGenerations* gens() { return _gens; } 117 118 // Returns JNI_OK on success 119 virtual jint initialize(); 120 121 void post_initialize(); 122 void update_counters(); 123 // The alignment used for the various generations. 124 size_t young_gen_alignment() const { return _young_gen_alignment; } 125 size_t old_gen_alignment() const { return _old_gen_alignment; } 126 127 // The alignment used for eden and survivors within the young gen 128 // and for boundary between young gen and old gen. 129 size_t intra_heap_alignment() { return GenCollectorPolicy::intra_heap_alignment(); } 130 131 size_t capacity() const; 132 size_t used() const; 133 134 // Return "true" if all generations have reached the 135 // maximal committed limit that they can reach, without a garbage 136 // collection. 137 virtual bool is_maximal_no_gc() const; 138 139 // Return true if the reference points to an object that 140 // can be moved in a partial collection. For currently implemented 141 // generational collectors that means during a collection of 142 // the young gen. 143 virtual bool is_scavengable(const void* addr); 144 145 // Does this heap support heap inspection? (+PrintClassHistogram) 146 bool supports_heap_inspection() const { return true; } 147 148 size_t max_capacity() const; 149 150 // Whether p is in the allocated part of the heap 151 bool is_in(const void* p) const; 152 153 bool is_in_reserved(const void* p) const; 154 155 #ifdef ASSERT 156 virtual bool is_in_partial_collection(const void *p); 157 #endif 158 159 bool is_in_young(oop p); // reserved part 160 bool is_in_old(oop p); // reserved part 161 162 // Memory allocation. "gc_time_limit_was_exceeded" will 163 // be set to true if the adaptive size policy determine that 164 // an excessive amount of time is being spent doing collections 165 // and caused a NULL to be returned. If a NULL is not returned, 166 // "gc_time_limit_was_exceeded" has an undefined meaning. 167 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 168 169 // Allocation attempt(s) during a safepoint. It should never be called 170 // to allocate a new TLAB as this allocation might be satisfied out 171 // of the old generation. 172 HeapWord* failed_mem_allocate(size_t size); 173 174 // Support for System.gc() 175 void collect(GCCause::Cause cause); 176 177 // These also should be called by the vm thread at a safepoint (e.g., from a 178 // VM operation). 179 // 180 // The first collects the young generation only, unless the scavenge fails; it 181 // will then attempt a full gc. The second collects the entire heap; if 182 // maximum_compaction is true, it will compact everything and clear all soft 183 // references. 184 inline void invoke_scavenge(); 185 186 // Perform a full collection 187 virtual void do_full_collection(bool clear_all_soft_refs); 188 189 bool supports_inline_contig_alloc() const { return !UseNUMA; } 190 191 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } 192 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } 193 194 void ensure_parsability(bool retire_tlabs); 195 void accumulate_statistics_all_tlabs(); 196 void resize_all_tlabs(); 197 198 size_t unsafe_max_alloc(); 199 200 bool supports_tlab_allocation() const { return true; } 201 202 size_t tlab_capacity(Thread* thr) const; 203 size_t unsafe_max_tlab_alloc(Thread* thr) const; 204 205 // Can a compiler initialize a new object without store barriers? 206 // This permission only extends from the creation of a new object 207 // via a TLAB up to the first subsequent safepoint. 208 virtual bool can_elide_tlab_store_barriers() const { 209 return true; 210 } 211 212 virtual bool card_mark_must_follow_store() const { 213 return false; 214 } 215 216 // Return true if we don't we need a store barrier for 217 // initializing stores to an object at this address. 218 virtual bool can_elide_initializing_store_barrier(oop new_obj); 219 220 void oop_iterate(ExtendedOopClosure* cl); 221 void object_iterate(ObjectClosure* cl); 222 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } 223 224 HeapWord* block_start(const void* addr) const; 225 size_t block_size(const HeapWord* addr) const; 226 bool block_is_obj(const HeapWord* addr) const; 227 228 jlong millis_since_last_gc(); 229 230 void prepare_for_verify(); 231 PSHeapSummary create_ps_heap_summary(); 232 virtual void print_on(outputStream* st) const; 233 virtual void print_on_error(outputStream* st) const; 234 virtual void print_gc_threads_on(outputStream* st) const; 235 virtual void gc_threads_do(ThreadClosure* tc) const; 236 virtual void print_tracing_info() const; 237 238 void verify(bool silent, VerifyOption option /* ignored */); 239 240 void print_heap_change(size_t prev_used); 241 242 // Resize the young generation. The reserved space for the 243 // generation may be expanded in preparation for the resize. 244 void resize_young_gen(size_t eden_size, size_t survivor_size); 245 246 // Resize the old generation. The reserved space for the 247 // generation may be expanded in preparation for the resize. 248 void resize_old_gen(size_t desired_free_space); 249 250 // Save the tops of the spaces in all generations 251 void record_gen_tops_before_GC() PRODUCT_RETURN; 252 253 // Mangle the unused parts of all spaces in the heap 254 void gen_mangle_unused_area() PRODUCT_RETURN; 255 256 // Call these in sequential code around the processing of strong roots. 257 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope { 258 public: 259 ParStrongRootsScope(); 260 ~ParStrongRootsScope(); 261 }; 262 }; 263 264 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) 265 { 266 assert(is_power_of_2((intptr_t)val), "must be a power of 2"); 267 var = round_to(val, intra_heap_alignment()); 268 return var; 269 } 270 271 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP