1 /*
   2  * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 class EdenSpace;
  26 class ContiguousSpace;
  27 class ScanClosure;
  28 
  29 // DefNewGeneration is a young generation containing eden, from- and
  30 // to-space.
  31 
  32 class DefNewGeneration: public Generation {
  33   friend class VMStructs;
  34 
  35 protected:
  36   Generation* _next_gen;
  37   int         _tenuring_threshold;   // Tenuring threshold for next collection.
  38   ageTable    _age_table;
  39   // Size of object to pretenure in words; command line provides bytes
  40   size_t        _pretenure_size_threshold_words;
  41 
  42   ageTable*   age_table() { return &_age_table; }
  43   // Initialize state to optimistically assume no promotion failure will
  44   // happen.
  45   void   init_assuming_no_promotion_failure();
  46   // True iff a promotion has failed in the current collection.
  47   bool   _promotion_failed;
  48   bool   promotion_failed() { return _promotion_failed; }
  49 
  50   // Handling promotion failure.  A young generation collection
  51   // can fail if a live object cannot be copied out of its
  52   // location in eden or from-space during the collection.  If
  53   // a collection fails, the young generation is left in a
  54   // consistent state such that it can be collected by a
  55   // full collection.
  56   //   Before the collection
  57   //     Objects are in eden or from-space
  58   //     All roots into the young generation point into eden or from-space.
  59   //
  60   //   After a failed collection
  61   //     Objects may be in eden, from-space, or to-space
  62   //     An object A in eden or from-space may have a copy B
  63   //       in to-space.  If B exists, all roots that once pointed
  64   //       to A must now point to B.
  65   //     All objects in the young generation are unmarked.
  66   //     Eden, from-space, and to-space will all be collected by
  67   //       the full collection.
  68   void handle_promotion_failure(oop);
  69 
  70   // In the absence of promotion failure, we wouldn't look at "from-space"
  71   // objects after a young-gen collection.  When promotion fails, however,
  72   // the subsequent full collection will look at from-space objects:
  73   // therefore we must remove their forwarding pointers.
  74   void remove_forwarding_pointers();
  75 
  76   // Preserve the mark of "obj", if necessary, in preparation for its mark
  77   // word being overwritten with a self-forwarding-pointer.
  78   void   preserve_mark_if_necessary(oop obj, markOop m);
  79 
  80   // Together, these keep <object with a preserved mark, mark value> pairs.
  81   // They should always contain the same number of elements.
  82   Stack<oop>     _objs_with_preserved_marks;
  83   Stack<markOop> _preserved_marks_of_objs;
  84 
  85   // Returns true if the collection can be safely attempted.
  86   // If this method returns false, a collection is not
  87   // guaranteed to fail but the system may not be able
  88   // to recover from the failure.
  89   bool collection_attempt_is_safe();
  90 
  91   // Promotion failure handling
  92   OopClosure *_promo_failure_scan_stack_closure;
  93   void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
  94     _promo_failure_scan_stack_closure = scan_stack_closure;
  95   }
  96 
  97   Stack<oop> _promo_failure_scan_stack;
  98   void drain_promo_failure_scan_stack(void);
  99   bool _promo_failure_drain_in_progress;
 100 
 101   // Performance Counters
 102   GenerationCounters*  _gen_counters;
 103   CSpaceCounters*      _eden_counters;
 104   CSpaceCounters*      _from_counters;
 105   CSpaceCounters*      _to_counters;
 106 
 107   // sizing information
 108   size_t               _max_eden_size;
 109   size_t               _max_survivor_size;
 110 
 111   // Allocation support
 112   bool _should_allocate_from_space;
 113   bool should_allocate_from_space() const {
 114     return _should_allocate_from_space;
 115   }
 116   void clear_should_allocate_from_space() {
 117     _should_allocate_from_space = false;
 118   }
 119   void set_should_allocate_from_space() {
 120     _should_allocate_from_space = true;
 121   }
 122 
 123  protected:
 124   // Spaces
 125   EdenSpace*       _eden_space;
 126   ContiguousSpace* _from_space;
 127   ContiguousSpace* _to_space;
 128 
 129   enum SomeProtectedConstants {
 130     // Generations are GenGrain-aligned and have size that are multiples of
 131     // GenGrain.
 132     MinFreeScratchWords = 100
 133   };
 134 
 135   // Return the size of a survivor space if this generation were of size
 136   // gen_size.
 137   size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
 138     size_t n = gen_size / (SurvivorRatio + 2);
 139     return n > alignment ? align_size_down(n, alignment) : alignment;
 140   }
 141 
 142  public:  // was "protected" but caused compile error on win32
 143   class IsAliveClosure: public BoolObjectClosure {
 144     Generation* _g;
 145   public:
 146     IsAliveClosure(Generation* g);
 147     void do_object(oop p);
 148     bool do_object_b(oop p);
 149   };
 150 
 151   class KeepAliveClosure: public OopClosure {
 152   protected:
 153     ScanWeakRefClosure* _cl;
 154     CardTableRS* _rs;
 155     template <class T> void do_oop_work(T* p);
 156   public:
 157     KeepAliveClosure(ScanWeakRefClosure* cl);
 158     virtual void do_oop(oop* p);
 159     virtual void do_oop(narrowOop* p);
 160   };
 161 
 162   class FastKeepAliveClosure: public KeepAliveClosure {
 163   protected:
 164     HeapWord* _boundary;
 165     template <class T> void do_oop_work(T* p);
 166   public:
 167     FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
 168     virtual void do_oop(oop* p);
 169     virtual void do_oop(narrowOop* p);
 170   };
 171 
 172   class EvacuateFollowersClosure: public VoidClosure {
 173     GenCollectedHeap* _gch;
 174     int _level;
 175     ScanClosure* _scan_cur_or_nonheap;
 176     ScanClosure* _scan_older;
 177   public:
 178     EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 179                              ScanClosure* cur, ScanClosure* older);
 180     void do_void();
 181   };
 182 
 183   class FastEvacuateFollowersClosure: public VoidClosure {
 184     GenCollectedHeap* _gch;
 185     int _level;
 186     DefNewGeneration* _gen;
 187     FastScanClosure* _scan_cur_or_nonheap;
 188     FastScanClosure* _scan_older;
 189   public:
 190     FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 191                                  DefNewGeneration* gen,
 192                                  FastScanClosure* cur,
 193                                  FastScanClosure* older);
 194     void do_void();
 195   };
 196 
 197  public:
 198   DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
 199                    const char* policy="Copy");
 200 
 201   virtual Generation::Name kind() { return Generation::DefNew; }
 202 
 203   // Accessing spaces
 204   EdenSpace*       eden() const           { return _eden_space; }
 205   ContiguousSpace* from() const           { return _from_space;  }
 206   ContiguousSpace* to()   const           { return _to_space;    }
 207 
 208   virtual CompactibleSpace* first_compaction_space() const;
 209 
 210   // Space enquiries
 211   size_t capacity() const;
 212   size_t used() const;
 213   size_t free() const;
 214   size_t max_capacity() const;
 215   size_t capacity_before_gc() const;
 216   size_t unsafe_max_alloc_nogc() const;
 217   size_t contiguous_available() const;
 218 
 219   size_t max_eden_size() const              { return _max_eden_size; }
 220   size_t max_survivor_size() const          { return _max_survivor_size; }
 221 
 222   bool supports_inline_contig_alloc() const { return true; }
 223   HeapWord** top_addr() const;
 224   HeapWord** end_addr() const;
 225 
 226   // Thread-local allocation buffers
 227   bool supports_tlab_allocation() const { return true; }
 228   size_t tlab_capacity() const;
 229   size_t unsafe_max_tlab_alloc() const;
 230 
 231   // Grow the generation by the specified number of bytes.
 232   // The size of bytes is assumed to be properly aligned.
 233   // Return true if the expansion was successful.
 234   bool expand(size_t bytes);
 235 
 236   // DefNewGeneration cannot currently expand except at
 237   // a GC.
 238   virtual bool is_maximal_no_gc() const { return true; }
 239 
 240   // Iteration
 241   void object_iterate(ObjectClosure* blk);
 242   void object_iterate_since_last_GC(ObjectClosure* cl);
 243 
 244   void younger_refs_iterate(OopsInGenClosure* cl);
 245 
 246   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
 247 
 248   // Allocation support
 249   virtual bool should_allocate(size_t word_size, bool is_tlab) {
 250     assert(UseTLAB || !is_tlab, "Should not allocate tlab");
 251 
 252     size_t overflow_limit    = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
 253 
 254     const bool non_zero      = word_size > 0;
 255     const bool overflows     = word_size >= overflow_limit;
 256     const bool check_too_big = _pretenure_size_threshold_words > 0;
 257     const bool not_too_big   = word_size < _pretenure_size_threshold_words;
 258     const bool size_ok       = is_tlab || !check_too_big || not_too_big;
 259 
 260     bool result = !overflows &&
 261                   non_zero   &&
 262                   size_ok;
 263 
 264     return result;
 265   }
 266 
 267   HeapWord* allocate(size_t word_size, bool is_tlab);
 268   HeapWord* allocate_from_space(size_t word_size);
 269 
 270   HeapWord* par_allocate(size_t word_size, bool is_tlab);
 271 
 272   // Prologue & Epilogue
 273   virtual void gc_prologue(bool full);
 274   virtual void gc_epilogue(bool full);
 275 
 276   // Save the tops for eden, from, and to
 277   virtual void record_spaces_top();
 278 
 279   // Doesn't require additional work during GC prologue and epilogue
 280   virtual bool performs_in_place_marking() const { return false; }
 281 
 282   // Accessing marks
 283   void save_marks();
 284   void reset_saved_marks();
 285   bool no_allocs_since_save_marks();
 286 
 287   // Need to declare the full complement of closures, whether we'll
 288   // override them or not, or get message from the compiler:
 289   //   oop_since_save_marks_iterate_nv hides virtual function...
 290 #define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
 291   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
 292 
 293   ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL)
 294 
 295 #undef DefNew_SINCE_SAVE_MARKS_DECL
 296 
 297   // For non-youngest collection, the DefNewGeneration can contribute
 298   // "to-space".
 299   virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
 300                           size_t max_alloc_words);
 301 
 302   // Reset for contribution of "to-space".
 303   virtual void reset_scratch();
 304 
 305   // GC support
 306   virtual void compute_new_size();
 307   virtual void collect(bool   full,
 308                        bool   clear_all_soft_refs,
 309                        size_t size,
 310                        bool   is_tlab);
 311   HeapWord* expand_and_allocate(size_t size,
 312                                 bool is_tlab,
 313                                 bool parallel = false);
 314 
 315   oop copy_to_survivor_space(oop old);
 316   int tenuring_threshold() { return _tenuring_threshold; }
 317 
 318   // Performance Counter support
 319   void update_counters();
 320 
 321   // Printing
 322   virtual const char* name() const;
 323   virtual const char* short_name() const { return "DefNew"; }
 324 
 325   bool must_be_youngest() const { return true; }
 326   bool must_be_oldest() const { return false; }
 327 
 328   // PrintHeapAtGC support.
 329   void print_on(outputStream* st) const;
 330 
 331   void verify(bool allow_dirty);
 332 
 333   bool promo_failure_scan_is_complete() const {
 334     return _promo_failure_scan_stack.is_empty();
 335   }
 336 
 337  protected:
 338   // If clear_space is true, clear the survivor spaces.  Eden is
 339   // cleared if the minimum size of eden is 0.  If mangle_space
 340   // is true, also mangle the space in debug mode.
 341   void compute_space_boundaries(uintx minimum_eden_size,
 342                                 bool clear_space,
 343                                 bool mangle_space);
 344   // Scavenge support
 345   void swap_spaces();
 346 };