1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_HPP 26 #define SHARE_VM_MEMORY_DEFNEWGENERATION_HPP 27 28 #include "gc_implementation/shared/ageTable.hpp" 29 #include "gc_implementation/shared/cSpaceCounters.hpp" 30 #include "gc_implementation/shared/generationCounters.hpp" 31 #include "gc_implementation/shared/copyFailedInfo.hpp" 32 #include "utilities/stack.hpp" 33 34 class ContiguousSpace; 35 class ScanClosure; 36 class STWGCTimer; 37 38 // DefNewGeneration is a young generation containing eden, from- and 39 // to-space. 40 41 class DefNewGeneration: public Generation { 42 friend class VMStructs; 43 44 protected: 45 Generation* _next_gen; 46 uint _tenuring_threshold; // Tenuring threshold for next collection. 47 ageTable _age_table; 48 // Size of object to pretenure in words; command line provides bytes 49 size_t _pretenure_size_threshold_words; 50 51 ageTable* age_table() { return &_age_table; } 52 53 // Initialize state to optimistically assume no promotion failure will 54 // happen. 55 void init_assuming_no_promotion_failure(); 56 // True iff a promotion has failed in the current collection. 57 bool _promotion_failed; 58 bool promotion_failed() { return _promotion_failed; } 59 PromotionFailedInfo _promotion_failed_info; 60 61 // Handling promotion failure. A young generation collection 62 // can fail if a live object cannot be copied out of its 63 // location in eden or from-space during the collection. If 64 // a collection fails, the young generation is left in a 65 // consistent state such that it can be collected by a 66 // full collection. 67 // Before the collection 68 // Objects are in eden or from-space 69 // All roots into the young generation point into eden or from-space. 70 // 71 // After a failed collection 72 // Objects may be in eden, from-space, or to-space 73 // An object A in eden or from-space may have a copy B 74 // in to-space. If B exists, all roots that once pointed 75 // to A must now point to B. 76 // All objects in the young generation are unmarked. 77 // Eden, from-space, and to-space will all be collected by 78 // the full collection. 79 void handle_promotion_failure(oop); 80 81 // In the absence of promotion failure, we wouldn't look at "from-space" 82 // objects after a young-gen collection. When promotion fails, however, 83 // the subsequent full collection will look at from-space objects: 84 // therefore we must remove their forwarding pointers. 85 void remove_forwarding_pointers(); 86 87 // Preserve the mark of "obj", if necessary, in preparation for its mark 88 // word being overwritten with a self-forwarding-pointer. 89 void preserve_mark_if_necessary(oop obj, markOop m); 90 void preserve_mark(oop obj, markOop m); // work routine used by the above 91 92 // Together, these keep <object with a preserved mark, mark value> pairs. 93 // They should always contain the same number of elements. 94 Stack<oop, mtGC> _objs_with_preserved_marks; 95 Stack<markOop, mtGC> _preserved_marks_of_objs; 96 97 // Promotion failure handling 98 ExtendedOopClosure *_promo_failure_scan_stack_closure; 99 void set_promo_failure_scan_stack_closure(ExtendedOopClosure *scan_stack_closure) { 100 _promo_failure_scan_stack_closure = scan_stack_closure; 101 } 102 103 Stack<oop, mtGC> _promo_failure_scan_stack; 104 void drain_promo_failure_scan_stack(void); 105 bool _promo_failure_drain_in_progress; 106 107 // Performance Counters 108 GenerationCounters* _gen_counters; 109 CSpaceCounters* _eden_counters; 110 CSpaceCounters* _from_counters; 111 CSpaceCounters* _to_counters; 112 113 // sizing information 114 size_t _max_eden_size; 115 size_t _max_survivor_size; 116 117 // Allocation support 118 bool _should_allocate_from_space; 119 bool should_allocate_from_space() const { 120 return _should_allocate_from_space; 121 } 122 void clear_should_allocate_from_space() { 123 _should_allocate_from_space = false; 124 } 125 void set_should_allocate_from_space() { 126 _should_allocate_from_space = true; 127 } 128 129 // Tenuring 130 void adjust_desired_tenuring_threshold(); 131 132 // Spaces 133 ContiguousSpace* _eden_space; 134 ContiguousSpace* _from_space; 135 ContiguousSpace* _to_space; 136 137 STWGCTimer* _gc_timer; 138 139 enum SomeProtectedConstants { 140 // Generations are GenGrain-aligned and have size that are multiples of 141 // GenGrain. 142 MinFreeScratchWords = 100 143 }; 144 145 // Return the size of a survivor space if this generation were of size 146 // gen_size. 147 size_t compute_survivor_size(size_t gen_size, size_t alignment) const { 148 size_t n = gen_size / (SurvivorRatio + 2); 149 return n > alignment ? align_size_down(n, alignment) : alignment; 150 } 151 152 public: // was "protected" but caused compile error on win32 153 class IsAliveClosure: public BoolObjectClosure { 154 Generation* _g; 155 public: 156 IsAliveClosure() {} 157 IsAliveClosure(Generation* g); 158 bool do_object_b(oop p); 159 }; 160 161 class KeepAliveClosure: public OopClosure { 162 protected: 163 ScanWeakRefClosure* _cl; 164 CardTableRS* _rs; 165 template <class T> void do_oop_work(T* p); 166 public: 167 KeepAliveClosure(ScanWeakRefClosure* cl); 168 virtual void do_oop(oop* p); 169 virtual void do_oop(narrowOop* p); 170 }; 171 172 class FastKeepAliveClosure: public KeepAliveClosure { 173 protected: 174 HeapWord* _boundary; 175 template <class T> void do_oop_work(T* p); 176 public: 177 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl); 178 virtual void do_oop(oop* p); 179 virtual void do_oop(narrowOop* p); 180 }; 181 182 class EvacuateFollowersClosure: public VoidClosure { 183 GenCollectedHeap* _gch; 184 int _level; 185 ScanClosure* _scan_cur_or_nonheap; 186 ScanClosure* _scan_older; 187 public: 188 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 189 ScanClosure* cur, ScanClosure* older); 190 void do_void(); 191 }; 192 193 class FastEvacuateFollowersClosure: public VoidClosure { 194 GenCollectedHeap* _gch; 195 int _level; 196 DefNewGeneration* _gen; 197 FastScanClosure* _scan_cur_or_nonheap; 198 FastScanClosure* _scan_older; 199 public: 200 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 201 DefNewGeneration* gen, 202 FastScanClosure* cur, 203 FastScanClosure* older); 204 void do_void(); 205 }; 206 207 public: 208 DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level, 209 const char* policy="Copy"); 210 211 virtual void ref_processor_init(); 212 213 virtual Generation::Name kind() { return Generation::DefNew; } 214 215 // Accessing spaces 216 ContiguousSpace* eden() const { return _eden_space; } 217 ContiguousSpace* from() const { return _from_space; } 218 ContiguousSpace* to() const { return _to_space; } 219 220 virtual CompactibleSpace* first_compaction_space() const; 221 222 // Space enquiries 223 size_t capacity() const; 224 size_t used() const; 225 size_t free() const; 226 size_t max_capacity() const; 227 size_t capacity_before_gc() const; 228 size_t unsafe_max_alloc_nogc() const; 229 size_t contiguous_available() const; 230 231 size_t max_eden_size() const { return _max_eden_size; } 232 size_t max_survivor_size() const { return _max_survivor_size; } 233 234 bool supports_inline_contig_alloc() const { return true; } 235 HeapWord** top_addr() const; 236 HeapWord** end_addr() const; 237 238 // Thread-local allocation buffers 239 bool supports_tlab_allocation() const { return true; } 240 size_t tlab_capacity() const; 241 size_t tlab_used() const; 242 size_t unsafe_max_tlab_alloc() const; 243 244 // Grow the generation by the specified number of bytes. 245 // The size of bytes is assumed to be properly aligned. 246 // Return true if the expansion was successful. 247 bool expand(size_t bytes); 248 249 // DefNewGeneration cannot currently expand except at 250 // a GC. 251 virtual bool is_maximal_no_gc() const { return true; } 252 253 // Iteration 254 void object_iterate(ObjectClosure* blk); 255 256 void younger_refs_iterate(OopsInGenClosure* cl); 257 258 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 259 260 // Allocation support 261 virtual bool should_allocate(size_t word_size, bool is_tlab) { 262 assert(UseTLAB || !is_tlab, "Should not allocate tlab"); 263 264 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); 265 266 const bool non_zero = word_size > 0; 267 const bool overflows = word_size >= overflow_limit; 268 const bool check_too_big = _pretenure_size_threshold_words > 0; 269 const bool not_too_big = word_size < _pretenure_size_threshold_words; 270 const bool size_ok = is_tlab || !check_too_big || not_too_big; 271 272 bool result = !overflows && 273 non_zero && 274 size_ok; 275 276 return result; 277 } 278 279 HeapWord* allocate(size_t word_size, bool is_tlab); 280 HeapWord* allocate_from_space(size_t word_size); 281 282 HeapWord* par_allocate(size_t word_size, bool is_tlab); 283 284 virtual void gc_epilogue(bool full); 285 286 // Save the tops for eden, from, and to 287 virtual void record_spaces_top(); 288 289 // Doesn't require additional work during GC prologue and epilogue 290 virtual bool performs_in_place_marking() const { return false; } 291 292 // Accessing marks 293 void save_marks(); 294 void reset_saved_marks(); 295 bool no_allocs_since_save_marks(); 296 297 // Need to declare the full complement of closures, whether we'll 298 // override them or not, or get message from the compiler: 299 // oop_since_save_marks_iterate_nv hides virtual function... 300 #define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 301 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 302 303 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL) 304 305 #undef DefNew_SINCE_SAVE_MARKS_DECL 306 307 // For non-youngest collection, the DefNewGeneration can contribute 308 // "to-space". 309 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, 310 size_t max_alloc_words); 311 312 // Reset for contribution of "to-space". 313 virtual void reset_scratch(); 314 315 // GC support 316 virtual void compute_new_size(); 317 318 // Returns true if the collection is likely to be safely 319 // completed. Even if this method returns true, a collection 320 // may not be guaranteed to succeed, and the system should be 321 // able to safely unwind and recover from that failure, albeit 322 // at some additional cost. Override superclass's implementation. 323 virtual bool collection_attempt_is_safe(); 324 325 virtual void collect(bool full, 326 bool clear_all_soft_refs, 327 size_t size, 328 bool is_tlab); 329 HeapWord* expand_and_allocate(size_t size, 330 bool is_tlab, 331 bool parallel = false); 332 333 oop copy_to_survivor_space(oop old); 334 uint tenuring_threshold() { return _tenuring_threshold; } 335 336 // Performance Counter support 337 void update_counters(); 338 339 // Printing 340 virtual const char* name() const; 341 virtual const char* short_name() const { return "DefNew"; } 342 343 // PrintHeapAtGC support. 344 void print_on(outputStream* st) const; 345 346 void verify(); 347 348 bool promo_failure_scan_is_complete() const { 349 return _promo_failure_scan_stack.is_empty(); 350 } 351 352 protected: 353 // If clear_space is true, clear the survivor spaces. Eden is 354 // cleared if the minimum size of eden is 0. If mangle_space 355 // is true, also mangle the space in debug mode. 356 void compute_space_boundaries(uintx minimum_eden_size, 357 bool clear_space, 358 bool mangle_space); 359 // Scavenge support 360 void swap_spaces(); 361 }; 362 363 #endif // SHARE_VM_MEMORY_DEFNEWGENERATION_HPP