1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 27 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp" 29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" 30 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 31 #include "gc_implementation/parallelScavenge/psPermGen.hpp" 32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 33 #include "gc_implementation/shared/gcPolicyCounters.hpp" 34 #include "gc_interface/collectedHeap.inline.hpp" 35 #include "utilities/ostream.hpp" 36 37 class AdjoiningGenerations; 38 class CollectorPolicy; 39 class GCHeapSummary; 40 class GCTaskManager; 41 class GenerationSizer; 42 class CollectorPolicy; 43 class PSAdaptiveSizePolicy; 44 class PSHeapSummary; 45 class VirtualSpaceSummary; 46 47 class ParallelScavengeHeap : public CollectedHeap { 48 friend class VMStructs; 49 private: 50 static PSYoungGen* _young_gen; 51 static PSOldGen* _old_gen; 52 static PSPermGen* _perm_gen; 53 54 // Sizing policy for entire heap 55 static PSAdaptiveSizePolicy* _size_policy; 56 static PSGCAdaptivePolicyCounters* _gc_policy_counters; 57 58 static ParallelScavengeHeap* _psh; 59 60 size_t _perm_gen_alignment; 61 size_t _young_gen_alignment; 62 size_t _old_gen_alignment; 63 64 GenerationSizer* _collector_policy; 65 66 inline size_t set_alignment(size_t& var, size_t val); 67 68 // Collection of generations that are adjacent in the 69 // space reserved for the heap. 70 AdjoiningGenerations* _gens; 71 unsigned int _death_march_count; 72 73 static GCTaskManager* _gc_task_manager; // The task manager. 74 75 void trace_heap(GCWhen::Type when, GCTracer* tracer); 76 77 protected: 78 static inline size_t total_invocations(); 79 HeapWord* allocate_new_tlab(size_t size); 80 81 inline bool should_alloc_in_eden(size_t size) const; 82 inline void death_march_check(HeapWord* const result, size_t size); 83 HeapWord* mem_allocate_old_gen(size_t size); 84 85 public: 86 ParallelScavengeHeap() : CollectedHeap() { 87 _death_march_count = 0; 88 set_alignment(_perm_gen_alignment, intra_heap_alignment()); 89 set_alignment(_young_gen_alignment, intra_heap_alignment()); 90 set_alignment(_old_gen_alignment, intra_heap_alignment()); 91 } 92 93 // For use by VM operations 94 enum CollectionType { 95 Scavenge, 96 MarkSweep 97 }; 98 99 ParallelScavengeHeap::Name kind() const { 100 return CollectedHeap::ParallelScavengeHeap; 101 } 102 103 CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } 104 // GenerationSizer* collector_policy() const { return _collector_policy; } 105 106 static PSYoungGen* young_gen() { return _young_gen; } 107 static PSOldGen* old_gen() { return _old_gen; } 108 static PSPermGen* perm_gen() { return _perm_gen; } 109 110 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } 111 112 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; } 113 114 static ParallelScavengeHeap* heap(); 115 116 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; } 117 118 AdjoiningGenerations* gens() { return _gens; } 119 120 // Returns JNI_OK on success 121 virtual jint initialize(); 122 123 void post_initialize(); 124 void update_counters(); 125 // The alignment used for the various generations. 126 size_t perm_gen_alignment() const { return _perm_gen_alignment; } 127 size_t young_gen_alignment() const { return _young_gen_alignment; } 128 size_t old_gen_alignment() const { return _old_gen_alignment; } 129 130 // The alignment used for eden and survivors within the young gen 131 // and for boundary between young gen and old gen. 132 size_t intra_heap_alignment() const { return 64 * K; } 133 134 size_t capacity() const; 135 size_t used() const; 136 137 // Return "true" if all generations (but perm) have reached the 138 // maximal committed limit that they can reach, without a garbage 139 // collection. 140 virtual bool is_maximal_no_gc() const; 141 142 // Return true if the reference points to an object that 143 // can be moved in a partial collection. For currently implemented 144 // generational collectors that means during a collection of 145 // the young gen. 146 virtual bool is_scavengable(const void* addr); 147 148 // Does this heap support heap inspection? (+PrintClassHistogram) 149 bool supports_heap_inspection() const { return true; } 150 151 size_t permanent_capacity() const; 152 size_t permanent_used() const; 153 154 size_t max_capacity() const; 155 156 // Whether p is in the allocated part of the heap 157 bool is_in(const void* p) const; 158 159 bool is_in_reserved(const void* p) const; 160 bool is_in_permanent(const void *p) const { // reserved part 161 return perm_gen()->reserved().contains(p); 162 } 163 164 #ifdef ASSERT 165 virtual bool is_in_partial_collection(const void *p); 166 #endif 167 168 bool is_permanent(const void *p) const { // committed part 169 return perm_gen()->is_in(p); 170 } 171 172 inline bool is_in_young(oop p); // reserved part 173 inline bool is_in_old_or_perm(oop p); // reserved part 174 175 // Memory allocation. "gc_time_limit_was_exceeded" will 176 // be set to true if the adaptive size policy determine that 177 // an excessive amount of time is being spent doing collections 178 // and caused a NULL to be returned. If a NULL is not returned, 179 // "gc_time_limit_was_exceeded" has an undefined meaning. 180 HeapWord* mem_allocate(size_t size, 181 bool* gc_overhead_limit_was_exceeded); 182 183 // Allocation attempt(s) during a safepoint. It should never be called 184 // to allocate a new TLAB as this allocation might be satisfied out 185 // of the old generation. 186 HeapWord* failed_mem_allocate(size_t size); 187 188 HeapWord* permanent_mem_allocate(size_t size); 189 HeapWord* failed_permanent_mem_allocate(size_t size); 190 191 // Support for System.gc() 192 void collect(GCCause::Cause cause); 193 194 // This interface assumes that it's being called by the 195 // vm thread. It collects the heap assuming that the 196 // heap lock is already held and that we are executing in 197 // the context of the vm thread. 198 void collect_as_vm_thread(GCCause::Cause cause); 199 200 // These also should be called by the vm thread at a safepoint (e.g., from a 201 // VM operation). 202 // 203 // The first collects the young generation only, unless the scavenge fails; it 204 // will then attempt a full gc. The second collects the entire heap; if 205 // maximum_compaction is true, it will compact everything and clear all soft 206 // references. 207 inline void invoke_scavenge(); 208 inline void invoke_full_gc(bool maximum_compaction); 209 210 bool supports_inline_contig_alloc() const { return !UseNUMA; } 211 212 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } 213 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } 214 215 void ensure_parsability(bool retire_tlabs); 216 void accumulate_statistics_all_tlabs(); 217 void resize_all_tlabs(); 218 219 size_t unsafe_max_alloc(); 220 221 bool supports_tlab_allocation() const { return true; } 222 223 size_t tlab_capacity(Thread* thr) const; 224 size_t unsafe_max_tlab_alloc(Thread* thr) const; 225 226 // Can a compiler initialize a new object without store barriers? 227 // This permission only extends from the creation of a new object 228 // via a TLAB up to the first subsequent safepoint. 229 virtual bool can_elide_tlab_store_barriers() const { 230 return true; 231 } 232 233 virtual bool card_mark_must_follow_store() const { 234 return false; 235 } 236 237 // Return true if we don't we need a store barrier for 238 // initializing stores to an object at this address. 239 virtual bool can_elide_initializing_store_barrier(oop new_obj); 240 241 // Can a compiler elide a store barrier when it writes 242 // a permanent oop into the heap? Applies when the compiler 243 // is storing x to the heap, where x->is_perm() is true. 244 virtual bool can_elide_permanent_oop_store_barriers() const { 245 return true; 246 } 247 248 void oop_iterate(OopClosure* cl); 249 void object_iterate(ObjectClosure* cl); 250 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } 251 void permanent_oop_iterate(OopClosure* cl); 252 void permanent_object_iterate(ObjectClosure* cl); 253 254 HeapWord* block_start(const void* addr) const; 255 size_t block_size(const HeapWord* addr) const; 256 bool block_is_obj(const HeapWord* addr) const; 257 258 jlong millis_since_last_gc(); 259 260 void prepare_for_verify(); 261 PSHeapSummary create_ps_heap_summary(); 262 VirtualSpaceSummary create_perm_gen_space_summary(); 263 virtual void print_on(outputStream* st) const; 264 virtual void print_gc_threads_on(outputStream* st) const; 265 virtual void gc_threads_do(ThreadClosure* tc) const; 266 virtual void print_tracing_info() const; 267 268 void verify(bool silent, VerifyOption option /* ignored */); 269 270 void print_heap_change(size_t prev_used); 271 272 // Resize the young generation. The reserved space for the 273 // generation may be expanded in preparation for the resize. 274 void resize_young_gen(size_t eden_size, size_t survivor_size); 275 276 // Resize the old generation. The reserved space for the 277 // generation may be expanded in preparation for the resize. 278 void resize_old_gen(size_t desired_free_space); 279 280 // Save the tops of the spaces in all generations 281 void record_gen_tops_before_GC() PRODUCT_RETURN; 282 283 // Mangle the unused parts of all spaces in the heap 284 void gen_mangle_unused_area() PRODUCT_RETURN; 285 286 // Call these in sequential code around the processing of strong roots. 287 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope { 288 public: 289 ParStrongRootsScope(); 290 ~ParStrongRootsScope(); 291 }; 292 }; 293 294 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) 295 { 296 assert(is_power_of_2((intptr_t)val), "must be a power of 2"); 297 var = round_to(val, intra_heap_alignment()); 298 return var; 299 } 300 301 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP