1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
  26 #define SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/ageTable.hpp"
  29 #include "gc_implementation/shared/cSpaceCounters.hpp"
  30 #include "gc_implementation/shared/generationCounters.hpp"
  31 #include "gc_implementation/shared/copyFailedInfo.hpp"
  32 #include "utilities/stack.hpp"
  33 
  34 class ContiguousSpace;
  35 class ScanClosure;
  36 class STWGCTimer;
  37 
  38 // DefNewGeneration is a young generation containing eden, from- and
  39 // to-space.
  40 
  41 class DefNewGeneration: public Generation {
  42   friend class VMStructs;
  43 
  44 protected:
  45   Generation* _next_gen;
  46   uint        _tenuring_threshold;   // Tenuring threshold for next collection.
  47   ageTable    _age_table;
  48   // Size of object to pretenure in words; command line provides bytes
  49   size_t      _pretenure_size_threshold_words;
  50 
  51   ageTable*   age_table() { return &_age_table; }
  52 
  53   // Initialize state to optimistically assume no promotion failure will
  54   // happen.
  55   void   init_assuming_no_promotion_failure();
  56   // True iff a promotion has failed in the current collection.
  57   bool   _promotion_failed;
  58   bool   promotion_failed() { return _promotion_failed; }
  59   PromotionFailedInfo _promotion_failed_info;
  60 
  61   // Handling promotion failure.  A young generation collection
  62   // can fail if a live object cannot be copied out of its
  63   // location in eden or from-space during the collection.  If
  64   // a collection fails, the young generation is left in a
  65   // consistent state such that it can be collected by a
  66   // full collection.
  67   //   Before the collection
  68   //     Objects are in eden or from-space
  69   //     All roots into the young generation point into eden or from-space.
  70   //
  71   //   After a failed collection
  72   //     Objects may be in eden, from-space, or to-space
  73   //     An object A in eden or from-space may have a copy B
  74   //       in to-space.  If B exists, all roots that once pointed
  75   //       to A must now point to B.
  76   //     All objects in the young generation are unmarked.
  77   //     Eden, from-space, and to-space will all be collected by
  78   //       the full collection.
  79   void handle_promotion_failure(oop);
  80 
  81   // In the absence of promotion failure, we wouldn't look at "from-space"
  82   // objects after a young-gen collection.  When promotion fails, however,
  83   // the subsequent full collection will look at from-space objects:
  84   // therefore we must remove their forwarding pointers.
  85   void remove_forwarding_pointers();
  86 
  87   // Preserve the mark of "obj", if necessary, in preparation for its mark
  88   // word being overwritten with a self-forwarding-pointer.
  89   void   preserve_mark_if_necessary(oop obj, markOop m);
  90   void   preserve_mark(oop obj, markOop m);    // work routine used by the above
  91 
  92   // Together, these keep <object with a preserved mark, mark value> pairs.
  93   // They should always contain the same number of elements.
  94   Stack<oop, mtGC>     _objs_with_preserved_marks;
  95   Stack<markOop, mtGC> _preserved_marks_of_objs;
  96 
  97   // Promotion failure handling
  98   ExtendedOopClosure *_promo_failure_scan_stack_closure;
  99   void set_promo_failure_scan_stack_closure(ExtendedOopClosure *scan_stack_closure) {
 100     _promo_failure_scan_stack_closure = scan_stack_closure;
 101   }
 102 
 103   Stack<oop, mtGC> _promo_failure_scan_stack;
 104   void drain_promo_failure_scan_stack(void);
 105   bool _promo_failure_drain_in_progress;
 106 
 107   // Performance Counters
 108   GenerationCounters*  _gen_counters;
 109   CSpaceCounters*      _eden_counters;
 110   CSpaceCounters*      _from_counters;
 111   CSpaceCounters*      _to_counters;
 112 
 113   // sizing information
 114   size_t               _max_eden_size;
 115   size_t               _max_survivor_size;
 116 
 117   // Allocation support
 118   bool _should_allocate_from_space;
 119   bool should_allocate_from_space() const {
 120     return _should_allocate_from_space;
 121   }
 122   void clear_should_allocate_from_space() {
 123     _should_allocate_from_space = false;
 124   }
 125   void set_should_allocate_from_space() {
 126     _should_allocate_from_space = true;
 127   }
 128 
 129   // Tenuring
 130   void adjust_desired_tenuring_threshold();
 131 
 132   // Spaces
 133   ContiguousSpace* _eden_space;
 134   ContiguousSpace* _from_space;
 135   ContiguousSpace* _to_space;
 136 
 137   STWGCTimer* _gc_timer;
 138 
 139   enum SomeProtectedConstants {
 140     // Generations are GenGrain-aligned and have size that are multiples of
 141     // GenGrain.
 142     MinFreeScratchWords = 100
 143   };
 144 
 145   // Return the size of a survivor space if this generation were of size
 146   // gen_size.
 147   size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
 148     size_t n = gen_size / (SurvivorRatio + 2);
 149     return n > alignment ? align_size_down(n, alignment) : alignment;
 150   }
 151 
 152  public:  // was "protected" but caused compile error on win32
 153   class IsAliveClosure: public BoolObjectClosure {
 154     Generation* _g;
 155   public:
 156     IsAliveClosure(Generation* g);
 157     bool do_object_b(oop p);
 158   };
 159 
 160   class KeepAliveClosure: public OopClosure {
 161   protected:
 162     ScanWeakRefClosure* _cl;
 163     CardTableRS* _rs;
 164     template <class T> void do_oop_work(T* p);
 165   public:
 166     KeepAliveClosure(ScanWeakRefClosure* cl);
 167     virtual void do_oop(oop* p);
 168     virtual void do_oop(narrowOop* p);
 169   };
 170 
 171   class FastKeepAliveClosure: public KeepAliveClosure {
 172   protected:
 173     HeapWord* _boundary;
 174     template <class T> void do_oop_work(T* p);
 175   public:
 176     FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
 177     virtual void do_oop(oop* p);
 178     virtual void do_oop(narrowOop* p);
 179   };
 180 
 181   class EvacuateFollowersClosure: public VoidClosure {
 182     GenCollectedHeap* _gch;
 183     int _level;
 184     ScanClosure* _scan_cur_or_nonheap;
 185     ScanClosure* _scan_older;
 186   public:
 187     EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 188                              ScanClosure* cur, ScanClosure* older);
 189     void do_void();
 190   };
 191 
 192   class FastEvacuateFollowersClosure: public VoidClosure {
 193     GenCollectedHeap* _gch;
 194     int _level;
 195     DefNewGeneration* _gen;
 196     FastScanClosure* _scan_cur_or_nonheap;
 197     FastScanClosure* _scan_older;
 198   public:
 199     FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 200                                  DefNewGeneration* gen,
 201                                  FastScanClosure* cur,
 202                                  FastScanClosure* older);
 203     void do_void();
 204   };
 205 
 206  public:
 207   DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
 208                    const char* policy="Copy");
 209 
 210   virtual void ref_processor_init();
 211 
 212   virtual Generation::Name kind() { return Generation::DefNew; }
 213 
 214   // Accessing spaces
 215   ContiguousSpace* eden() const           { return _eden_space; }
 216   ContiguousSpace* from() const           { return _from_space; }
 217   ContiguousSpace* to()   const           { return _to_space;   }
 218 
 219   virtual CompactibleSpace* first_compaction_space() const;
 220 
 221   // Space enquiries
 222   size_t capacity() const;
 223   size_t used() const;
 224   size_t free() const;
 225   size_t max_capacity() const;
 226   size_t capacity_before_gc() const;
 227   size_t unsafe_max_alloc_nogc() const;
 228   size_t contiguous_available() const;
 229 
 230   size_t max_eden_size() const              { return _max_eden_size; }
 231   size_t max_survivor_size() const          { return _max_survivor_size; }
 232 
 233   bool supports_inline_contig_alloc() const { return true; }
 234   HeapWord** top_addr() const;
 235   HeapWord** end_addr() const;
 236 
 237   // Thread-local allocation buffers
 238   bool supports_tlab_allocation() const { return true; }
 239   size_t tlab_capacity() const;
 240   size_t tlab_used() const;
 241   size_t unsafe_max_tlab_alloc() const;
 242 
 243   // Grow the generation by the specified number of bytes.
 244   // The size of bytes is assumed to be properly aligned.
 245   // Return true if the expansion was successful.
 246   bool expand(size_t bytes);
 247 
 248   // DefNewGeneration cannot currently expand except at
 249   // a GC.
 250   virtual bool is_maximal_no_gc() const { return true; }
 251 
 252   // Iteration
 253   void object_iterate(ObjectClosure* blk);
 254 
 255   void younger_refs_iterate(OopsInGenClosure* cl);
 256 
 257   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
 258 
 259   // Allocation support
 260   virtual bool should_allocate(size_t word_size, bool is_tlab) {
 261     assert(UseTLAB || !is_tlab, "Should not allocate tlab");
 262 
 263     size_t overflow_limit    = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
 264 
 265     const bool non_zero      = word_size > 0;
 266     const bool overflows     = word_size >= overflow_limit;
 267     const bool check_too_big = _pretenure_size_threshold_words > 0;
 268     const bool not_too_big   = word_size < _pretenure_size_threshold_words;
 269     const bool size_ok       = is_tlab || !check_too_big || not_too_big;
 270 
 271     bool result = !overflows &&
 272                   non_zero   &&
 273                   size_ok;
 274 
 275     return result;
 276   }
 277 
 278   HeapWord* allocate(size_t word_size, bool is_tlab);
 279   HeapWord* allocate_from_space(size_t word_size);
 280 
 281   HeapWord* par_allocate(size_t word_size, bool is_tlab);
 282 
 283   virtual void gc_epilogue(bool full);
 284 
 285   // Save the tops for eden, from, and to
 286   virtual void record_spaces_top();
 287 
 288   // Doesn't require additional work during GC prologue and epilogue
 289   virtual bool performs_in_place_marking() const { return false; }
 290 
 291   // Accessing marks
 292   void save_marks();
 293   void reset_saved_marks();
 294   bool no_allocs_since_save_marks();
 295 
 296   // Need to declare the full complement of closures, whether we'll
 297   // override them or not, or get message from the compiler:
 298   //   oop_since_save_marks_iterate_nv hides virtual function...
 299 #define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
 300   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
 301 
 302   ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL)
 303 
 304 #undef DefNew_SINCE_SAVE_MARKS_DECL
 305 
 306   // For non-youngest collection, the DefNewGeneration can contribute
 307   // "to-space".
 308   virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
 309                           size_t max_alloc_words);
 310 
 311   // Reset for contribution of "to-space".
 312   virtual void reset_scratch();
 313 
 314   // GC support
 315   virtual void compute_new_size();
 316 
 317   // Returns true if the collection is likely to be safely
 318   // completed. Even if this method returns true, a collection
 319   // may not be guaranteed to succeed, and the system should be
 320   // able to safely unwind and recover from that failure, albeit
 321   // at some additional cost. Override superclass's implementation.
 322   virtual bool collection_attempt_is_safe();
 323 
 324   virtual void collect(bool   full,
 325                        bool   clear_all_soft_refs,
 326                        size_t size,
 327                        bool   is_tlab);
 328   HeapWord* expand_and_allocate(size_t size,
 329                                 bool is_tlab,
 330                                 bool parallel = false);
 331 
 332   oop copy_to_survivor_space(oop old);
 333   uint tenuring_threshold() { return _tenuring_threshold; }
 334 
 335   // Performance Counter support
 336   void update_counters();
 337 
 338   // Printing
 339   virtual const char* name() const;
 340   virtual const char* short_name() const { return "DefNew"; }
 341 
 342   // PrintHeapAtGC support.
 343   void print_on(outputStream* st) const;
 344 
 345   void verify();
 346 
 347   bool promo_failure_scan_is_complete() const {
 348     return _promo_failure_scan_stack.is_empty();
 349   }
 350 
 351  protected:
 352   // If clear_space is true, clear the survivor spaces.  Eden is
 353   // cleared if the minimum size of eden is 0.  If mangle_space
 354   // is true, also mangle the space in debug mode.
 355   void compute_space_boundaries(uintx minimum_eden_size,
 356                                 bool clear_space,
 357                                 bool mangle_space);
 358   // Scavenge support
 359   void swap_spaces();
 360 };
 361 
 362 #endif // SHARE_VM_MEMORY_DEFNEWGENERATION_HPP