1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP 27 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp" 29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" 30 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 31 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 32 #include "gc_implementation/shared/gcPolicyCounters.hpp" 33 #include "gc_interface/collectedHeap.inline.hpp" 34 #include "utilities/ostream.hpp" 35 36 class AdjoiningGenerations; 37 class GCTaskManager; 38 class PSAdaptiveSizePolicy; 39 class GenerationSizer; 40 class CollectorPolicy; 41 42 class ParallelScavengeHeap : public CollectedHeap { 43 friend class VMStructs; 44 private: 45 static PSYoungGen* _young_gen; 46 static PSOldGen* _old_gen; 47 48 // Sizing policy for entire heap 49 static PSAdaptiveSizePolicy* _size_policy; 50 static PSGCAdaptivePolicyCounters* _gc_policy_counters; 51 52 static ParallelScavengeHeap* _psh; 53 54 size_t _young_gen_alignment; 55 size_t _old_gen_alignment; 56 57 GenerationSizer* _collector_policy; 58 59 inline size_t set_alignment(size_t& var, size_t val); 60 61 // Collection of generations that are adjacent in the 62 // space reserved for the heap. 63 AdjoiningGenerations* _gens; 64 unsigned int _death_march_count; 65 66 static GCTaskManager* _gc_task_manager; // The task manager. 67 68 protected: 69 static inline size_t total_invocations(); 70 HeapWord* allocate_new_tlab(size_t size); 71 72 inline bool should_alloc_in_eden(size_t size) const; 73 inline void death_march_check(HeapWord* const result, size_t size); 74 HeapWord* mem_allocate_old_gen(size_t size); 75 76 public: 77 ParallelScavengeHeap() : CollectedHeap() { 78 _death_march_count = 0; 79 set_alignment(_young_gen_alignment, intra_heap_alignment()); 80 set_alignment(_old_gen_alignment, intra_heap_alignment()); 81 } 82 83 // For use by VM operations 84 enum CollectionType { 85 Scavenge, 86 MarkSweep 87 }; 88 89 ParallelScavengeHeap::Name kind() const { 90 return CollectedHeap::ParallelScavengeHeap; 91 } 92 93 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } 94 95 static PSYoungGen* young_gen() { return _young_gen; } 96 static PSOldGen* old_gen() { return _old_gen; } 97 98 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } 99 100 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; } 101 102 static ParallelScavengeHeap* heap(); 103 104 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; } 105 106 AdjoiningGenerations* gens() { return _gens; } 107 108 // Returns JNI_OK on success 109 virtual jint initialize(); 110 111 void post_initialize(); 112 void update_counters(); 113 // The alignment used for the various generations. 114 size_t young_gen_alignment() const { return _young_gen_alignment; } 115 size_t old_gen_alignment() const { return _old_gen_alignment; } 116 117 // The alignment used for eden and survivors within the young gen 118 // and for boundary between young gen and old gen. 119 size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; } 120 121 size_t capacity() const; 122 size_t used() const; 123 124 // Return "true" if all generations have reached the 125 // maximal committed limit that they can reach, without a garbage 126 // collection. 127 virtual bool is_maximal_no_gc() const; 128 129 // Return true if the reference points to an object that 130 // can be moved in a partial collection. For currently implemented 131 // generational collectors that means during a collection of 132 // the young gen. 133 virtual bool is_scavengable(const void* addr); 134 135 // Does this heap support heap inspection? (+PrintClassHistogram) 136 bool supports_heap_inspection() const { return true; } 137 138 size_t max_capacity() const; 139 140 // Whether p is in the allocated part of the heap 141 bool is_in(const void* p) const; 142 143 bool is_in_reserved(const void* p) const; 144 145 #ifdef ASSERT 146 virtual bool is_in_partial_collection(const void *p); 147 #endif 148 149 bool is_in_young(oop p); // reserved part 150 bool is_in_old(oop p); // reserved part 151 152 // Memory allocation. "gc_time_limit_was_exceeded" will 153 // be set to true if the adaptive size policy determine that 154 // an excessive amount of time is being spent doing collections 155 // and caused a NULL to be returned. If a NULL is not returned, 156 // "gc_time_limit_was_exceeded" has an undefined meaning. 157 HeapWord* mem_allocate(size_t size, 158 bool* gc_overhead_limit_was_exceeded); 159 160 // Allocation attempt(s) during a safepoint. It should never be called 161 // to allocate a new TLAB as this allocation might be satisfied out 162 // of the old generation. 163 HeapWord* failed_mem_allocate(size_t size); 164 165 // Support for System.gc() 166 void collect(GCCause::Cause cause); 167 168 // These also should be called by the vm thread at a safepoint (e.g., from a 169 // VM operation). 170 // 171 // The first collects the young generation only, unless the scavenge fails; it 172 // will then attempt a full gc. The second collects the entire heap; if 173 // maximum_compaction is true, it will compact everything and clear all soft 174 // references. 175 inline void invoke_scavenge(); 176 177 // Perform a full collection 178 virtual void do_full_collection(bool clear_all_soft_refs); 179 180 bool supports_inline_contig_alloc() const { return !UseNUMA; } 181 182 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; } 183 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } 184 185 void ensure_parsability(bool retire_tlabs); 186 void accumulate_statistics_all_tlabs(); 187 void resize_all_tlabs(); 188 189 size_t unsafe_max_alloc(); 190 191 bool supports_tlab_allocation() const { return true; } 192 193 size_t tlab_capacity(Thread* thr) const; 194 size_t unsafe_max_tlab_alloc(Thread* thr) const; 195 196 // Can a compiler initialize a new object without store barriers? 197 // This permission only extends from the creation of a new object 198 // via a TLAB up to the first subsequent safepoint. 199 virtual bool can_elide_tlab_store_barriers() const { 200 return true; 201 } 202 203 virtual bool card_mark_must_follow_store() const { 204 return false; 205 } 206 207 // Return true if we don't we need a store barrier for 208 // initializing stores to an object at this address. 209 virtual bool can_elide_initializing_store_barrier(oop new_obj); 210 211 void oop_iterate(ExtendedOopClosure* cl); 212 void object_iterate(ObjectClosure* cl); 213 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } 214 215 HeapWord* block_start(const void* addr) const; 216 size_t block_size(const HeapWord* addr) const; 217 bool block_is_obj(const HeapWord* addr) const; 218 219 jlong millis_since_last_gc(); 220 221 void prepare_for_verify(); 222 virtual void print_on(outputStream* st) const; 223 virtual void print_on_error(outputStream* st) const; 224 virtual void print_gc_threads_on(outputStream* st) const; 225 virtual void gc_threads_do(ThreadClosure* tc) const; 226 virtual void print_tracing_info() const; 227 228 void verify(bool silent, VerifyOption option /* ignored */); 229 230 void print_heap_change(size_t prev_used); 231 232 // Resize the young generation. The reserved space for the 233 // generation may be expanded in preparation for the resize. 234 void resize_young_gen(size_t eden_size, size_t survivor_size); 235 236 // Resize the old generation. The reserved space for the 237 // generation may be expanded in preparation for the resize. 238 void resize_old_gen(size_t desired_free_space); 239 240 // Save the tops of the spaces in all generations 241 void record_gen_tops_before_GC() PRODUCT_RETURN; 242 243 // Mangle the unused parts of all spaces in the heap 244 void gen_mangle_unused_area() PRODUCT_RETURN; 245 246 // Call these in sequential code around the processing of strong roots. 247 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope { 248 public: 249 ParStrongRootsScope(); 250 ~ParStrongRootsScope(); 251 }; 252 }; 253 254 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) 255 { 256 assert(is_power_of_2((intptr_t)val), "must be a power of 2"); 257 var = round_to(val, intra_heap_alignment()); 258 return var; 259 } 260 261 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP