1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SERIAL_DEFNEWGENERATION_HPP
  26 #define SHARE_GC_SERIAL_DEFNEWGENERATION_HPP
  27 
  28 #include "gc/serial/cSpaceCounters.hpp"
  29 #include "gc/shared/ageTable.hpp"
  30 #include "gc/shared/copyFailedInfo.hpp"
  31 #include "gc/shared/generation.hpp"
  32 #include "gc/shared/generationCounters.hpp"
  33 #include "gc/shared/preservedMarks.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/stack.hpp"
  36 
  37 class ContiguousSpace;
  38 class CSpaceCounters;
  39 class DefNewYoungerGenClosure;
  40 class DefNewScanClosure;
  41 class ScanWeakRefClosure;
  42 class SerialHeap;
  43 class STWGCTimer;
  44 
  45 // DefNewGeneration is a young generation containing eden, from- and
  46 // to-space.
  47 
  48 class DefNewGeneration: public Generation {
  49   friend class VMStructs;
  50 
  51 protected:
  52   Generation* _old_gen;
  53   uint        _tenuring_threshold;   // Tenuring threshold for next collection.
  54   AgeTable    _age_table;
  55   // Size of object to pretenure in words; command line provides bytes
  56   size_t      _pretenure_size_threshold_words;
  57 
  58   AgeTable*   age_table() { return &_age_table; }
  59 
  60   // Initialize state to optimistically assume no promotion failure will
  61   // happen.
  62   void   init_assuming_no_promotion_failure();
  63   // True iff a promotion has failed in the current collection.
  64   bool   _promotion_failed;
  65   bool   promotion_failed() { return _promotion_failed; }
  66   PromotionFailedInfo _promotion_failed_info;
  67 
  68   // Handling promotion failure.  A young generation collection
  69   // can fail if a live object cannot be copied out of its
  70   // location in eden or from-space during the collection.  If
  71   // a collection fails, the young generation is left in a
  72   // consistent state such that it can be collected by a
  73   // full collection.
  74   //   Before the collection
  75   //     Objects are in eden or from-space
  76   //     All roots into the young generation point into eden or from-space.
  77   //
  78   //   After a failed collection
  79   //     Objects may be in eden, from-space, or to-space
  80   //     An object A in eden or from-space may have a copy B
  81   //       in to-space.  If B exists, all roots that once pointed
  82   //       to A must now point to B.
  83   //     All objects in the young generation are unmarked.
  84   //     Eden, from-space, and to-space will all be collected by
  85   //       the full collection.
  86   void handle_promotion_failure(oop);
  87 
  88   // In the absence of promotion failure, we wouldn't look at "from-space"
  89   // objects after a young-gen collection.  When promotion fails, however,
  90   // the subsequent full collection will look at from-space objects:
  91   // therefore we must remove their forwarding pointers.
  92   void remove_forwarding_pointers();
  93 
  94   virtual void restore_preserved_marks();
  95 
  96   // Preserved marks
  97   PreservedMarksSet _preserved_marks_set;
  98 
  99   // Promotion failure handling
 100   OopIterateClosure *_promo_failure_scan_stack_closure;
 101   void set_promo_failure_scan_stack_closure(OopIterateClosure *scan_stack_closure) {
 102     _promo_failure_scan_stack_closure = scan_stack_closure;
 103   }
 104 
 105   Stack<oop, mtGC> _promo_failure_scan_stack;
 106   void drain_promo_failure_scan_stack(void);
 107   bool _promo_failure_drain_in_progress;
 108 
 109   // Performance Counters
 110   GenerationCounters*  _gen_counters;
 111   CSpaceCounters*      _eden_counters;
 112   CSpaceCounters*      _from_counters;
 113   CSpaceCounters*      _to_counters;
 114 
 115   // sizing information
 116   size_t               _max_eden_size;
 117   size_t               _max_survivor_size;
 118 
 119   // Allocation support
 120   bool _should_allocate_from_space;
 121   bool should_allocate_from_space() const {
 122     return _should_allocate_from_space;
 123   }
 124   void clear_should_allocate_from_space() {
 125     _should_allocate_from_space = false;
 126   }
 127   void set_should_allocate_from_space() {
 128     _should_allocate_from_space = true;
 129   }
 130 
 131   // Tenuring
 132   void adjust_desired_tenuring_threshold();
 133 
 134   // Spaces
 135   ContiguousSpace* _eden_space;
 136   ContiguousSpace* _from_space;
 137   ContiguousSpace* _to_space;
 138 
 139   STWGCTimer* _gc_timer;
 140 
 141   enum SomeProtectedConstants {
 142     // Generations are GenGrain-aligned and have size that are multiples of
 143     // GenGrain.
 144     MinFreeScratchWords = 100
 145   };
 146 
 147   // Return the size of a survivor space if this generation were of size
 148   // gen_size.
 149   size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
 150     size_t n = gen_size / (SurvivorRatio + 2);
 151     return n > alignment ? align_down(n, alignment) : alignment;
 152   }
 153 
 154  public:  // was "protected" but caused compile error on win32
 155   class IsAliveClosure: public BoolObjectClosure {
 156     Generation* _young_gen;
 157   public:
 158     IsAliveClosure(Generation* young_gen);
 159     bool do_object_b(oop p);
 160   };
 161 
 162   class KeepAliveClosure: public OopClosure {
 163   protected:
 164     ScanWeakRefClosure* _cl;
 165     CardTableRS* _rs;
 166     template <class T> void do_oop_work(T* p);
 167   public:
 168     KeepAliveClosure(ScanWeakRefClosure* cl);
 169     virtual void do_oop(oop* p);
 170     virtual void do_oop(narrowOop* p);
 171   };
 172 
 173   class FastKeepAliveClosure: public KeepAliveClosure {
 174   protected:
 175     HeapWord* _boundary;
 176     template <class T> void do_oop_work(T* p);
 177   public:
 178     FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
 179     virtual void do_oop(oop* p);
 180     virtual void do_oop(narrowOop* p);
 181   };
 182 
 183   class FastEvacuateFollowersClosure: public VoidClosure {
 184     SerialHeap* _heap;
 185     DefNewScanClosure* _scan_cur_or_nonheap;
 186     DefNewYoungerGenClosure* _scan_older;
 187   public:
 188     FastEvacuateFollowersClosure(SerialHeap* heap,
 189                                  DefNewScanClosure* cur,
 190                                  DefNewYoungerGenClosure* older);
 191     void do_void();
 192   };
 193 
 194  public:
 195   DefNewGeneration(ReservedSpace rs,
 196                    size_t initial_byte_size,
 197                    size_t min_byte_size,
 198                    size_t max_byte_size,
 199                    const char* policy="Serial young collection pauses");
 200 
 201   virtual void ref_processor_init();
 202 
 203   virtual Generation::Name kind() { return Generation::DefNew; }
 204 
 205   // Accessing spaces
 206   ContiguousSpace* eden() const           { return _eden_space; }
 207   ContiguousSpace* from() const           { return _from_space; }
 208   ContiguousSpace* to()   const           { return _to_space;   }
 209 
 210   virtual CompactibleSpace* first_compaction_space() const;
 211 
 212   // Space enquiries
 213   size_t capacity() const;
 214   size_t used() const;
 215   size_t free() const;
 216   size_t max_capacity() const;
 217   size_t capacity_before_gc() const;
 218   size_t unsafe_max_alloc_nogc() const;
 219   size_t contiguous_available() const;
 220 
 221   size_t max_eden_size() const              { return _max_eden_size; }
 222   size_t max_survivor_size() const          { return _max_survivor_size; }
 223 
 224   bool supports_inline_contig_alloc() const { return true; }
 225   HeapWord* volatile* top_addr() const;
 226   HeapWord** end_addr() const;
 227 
 228   // Thread-local allocation buffers
 229   bool supports_tlab_allocation() const { return true; }
 230   size_t tlab_capacity() const;
 231   size_t tlab_used() const;
 232   size_t unsafe_max_tlab_alloc() const;
 233 
 234   // Grow the generation by the specified number of bytes.
 235   // The size of bytes is assumed to be properly aligned.
 236   // Return true if the expansion was successful.
 237   bool expand(size_t bytes);
 238 
 239   // DefNewGeneration cannot currently expand except at
 240   // a GC.
 241   virtual bool is_maximal_no_gc() const { return true; }
 242 
 243   // Iteration
 244   void object_iterate(ObjectClosure* blk);
 245 
 246   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
 247 
 248   // Allocation support
 249   virtual bool should_allocate(size_t word_size, bool is_tlab) {
 250     assert(UseTLAB || !is_tlab, "Should not allocate tlab");
 251 
 252     size_t overflow_limit    = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
 253 
 254     const bool non_zero      = word_size > 0;
 255     const bool overflows     = word_size >= overflow_limit;
 256     const bool check_too_big = _pretenure_size_threshold_words > 0;
 257     const bool not_too_big   = word_size < _pretenure_size_threshold_words;
 258     const bool size_ok       = is_tlab || !check_too_big || not_too_big;
 259 
 260     bool result = !overflows &&
 261                   non_zero   &&
 262                   size_ok;
 263 
 264     return result;
 265   }
 266 
 267   HeapWord* allocate(size_t word_size, bool is_tlab);
 268   HeapWord* allocate_from_space(size_t word_size);
 269 
 270   HeapWord* par_allocate(size_t word_size, bool is_tlab);
 271 
 272   virtual void gc_epilogue(bool full);
 273 
 274   // Save the tops for eden, from, and to
 275   virtual void record_spaces_top();
 276 
 277   // Accessing marks
 278   void save_marks();
 279   void reset_saved_marks();
 280   bool no_allocs_since_save_marks();
 281 
 282   // Need to declare the full complement of closures, whether we'll
 283   // override them or not, or get message from the compiler:
 284   //   oop_since_save_marks_iterate_nv hides virtual function...
 285   template <typename OopClosureType>
 286   void oop_since_save_marks_iterate(OopClosureType* cl);
 287 
 288   // For non-youngest collection, the DefNewGeneration can contribute
 289   // "to-space".
 290   virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
 291                           size_t max_alloc_words);
 292 
 293   // Reset for contribution of "to-space".
 294   virtual void reset_scratch();
 295 
 296   // GC support
 297   virtual void compute_new_size();
 298 
 299   // Returns true if the collection is likely to be safely
 300   // completed. Even if this method returns true, a collection
 301   // may not be guaranteed to succeed, and the system should be
 302   // able to safely unwind and recover from that failure, albeit
 303   // at some additional cost. Override superclass's implementation.
 304   virtual bool collection_attempt_is_safe();
 305 
 306   virtual void collect(bool   full,
 307                        bool   clear_all_soft_refs,
 308                        size_t size,
 309                        bool   is_tlab);
 310   HeapWord* expand_and_allocate(size_t size,
 311                                 bool is_tlab,
 312                                 bool parallel = false);
 313 
 314   oop copy_to_survivor_space(oop old);
 315   uint tenuring_threshold() { return _tenuring_threshold; }
 316 
 317   // Performance Counter support
 318   void update_counters();
 319 
 320   // Printing
 321   virtual const char* name() const;
 322   virtual const char* short_name() const { return "DefNew"; }
 323 
 324   void print_on(outputStream* st) const;
 325 
 326   void verify();
 327 
 328   bool promo_failure_scan_is_complete() const {
 329     return _promo_failure_scan_stack.is_empty();
 330   }
 331 
 332  protected:
 333   // If clear_space is true, clear the survivor spaces.  Eden is
 334   // cleared if the minimum size of eden is 0.  If mangle_space
 335   // is true, also mangle the space in debug mode.
 336   void compute_space_boundaries(uintx minimum_eden_size,
 337                                 bool clear_space,
 338                                 bool mangle_space);
 339 
 340   // Return adjusted new size for NewSizeThreadIncrease.
 341   // If any overflow happens, revert to previous new size.
 342   size_t adjust_for_thread_increase(size_t new_size_candidate,
 343                                     size_t new_size_before,
 344                                     size_t alignment) const;
 345 
 346 
 347   // Scavenge support
 348   void swap_spaces();
 349 };
 350 
 351 #endif // SHARE_GC_SERIAL_DEFNEWGENERATION_HPP