1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_TENUREDGENERATION_HPP
  26 #define SHARE_VM_MEMORY_TENUREDGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/cSpaceCounters.hpp"
  29 #include "gc_implementation/shared/gcStats.hpp"
  30 #include "gc_implementation/shared/generationCounters.hpp"
  31 #include "memory/cardGeneration.hpp"
  32 #include "utilities/macros.hpp"
  33 
  34 // TenuredGeneration models the heap containing old (promoted/tenured) objects
  35 // contained in a single contiguous space.
  36 //
  37 // Garbage collection is performed using mark-compact.
  38 
  39 class TenuredGeneration: public CardGeneration {
  40   friend class VMStructs;
  41   // Abstractly, this is a subtype that gets access to protected fields.
  42   friend class VM_PopulateDumpSharedSpace;
  43 
  44  protected:
  45   ContiguousSpace*  _the_space;       // actual space holding objects
  46 
  47   GenerationCounters*   _gen_counters;
  48   CSpaceCounters*       _space_counters;
  49 
  50   // Grow generation with specified size (returns false if unable to grow)
  51   virtual bool grow_by(size_t bytes);
  52   // Grow generation to reserved size.
  53   virtual bool grow_to_reserved();
  54   // Shrink generation with specified size (returns false if unable to shrink)
  55   void shrink_by(size_t bytes);
  56 
  57   // Allocation failure
  58   virtual bool expand(size_t bytes, size_t expand_bytes);
  59   void shrink(size_t bytes);
  60 
  61   // Accessing spaces
  62   ContiguousSpace* the_space() const { return _the_space; }
  63 
  64  public:
  65   TenuredGeneration(ReservedSpace rs, size_t initial_byte_size,
  66                                int level, GenRemSet* remset);
  67 
  68   Generation::Name kind() { return Generation::MarkSweepCompact; }
  69 
  70   // Printing
  71   const char* name() const { return "tenured generation"; }
  72   const char* short_name() const { return "Tenured"; }
  73 
  74   // Does a "full" (forced) collection invoked on this generation collect
  75   // all younger generations as well? Note that this is a
  76   // hack to allow the collection of the younger gen first if the flag is
  77   // set.
  78   virtual bool full_collects_younger_generations() const {
  79     return !ScavengeBeforeFullGC;
  80   }
  81 
  82   inline bool is_in(const void* p) const;
  83 
  84   // Space enquiries
  85   size_t capacity() const;
  86   size_t used() const;
  87   size_t free() const;
  88 
  89   MemRegion used_region() const;
  90 
  91   size_t unsafe_max_alloc_nogc() const;
  92   size_t contiguous_available() const;
  93 
  94   // Iteration
  95   void object_iterate(ObjectClosure* blk);
  96   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
  97 
  98   void younger_refs_iterate(OopsInGenClosure* blk);
  99 
 100   inline CompactibleSpace* first_compaction_space() const;
 101 
 102   virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
 103   virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
 104 
 105 #define TenuredGen_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)     \
 106   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
 107   TenuredGen_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
 108   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(TenuredGen_SINCE_SAVE_MARKS_DECL)
 109 
 110   void save_marks();
 111   void reset_saved_marks();
 112   bool no_allocs_since_save_marks();
 113 
 114   inline size_t block_size(const HeapWord* addr) const;
 115 
 116   inline bool block_is_obj(const HeapWord* addr) const;
 117 
 118   virtual void collect(bool full,
 119                        bool clear_all_soft_refs,
 120                        size_t size,
 121                        bool is_tlab);
 122   HeapWord* expand_and_allocate(size_t size,
 123                                 bool is_tlab,
 124                                 bool parallel = false);
 125 
 126   virtual void prepare_for_verify();
 127 
 128 
 129   virtual void gc_prologue(bool full);
 130   virtual void gc_epilogue(bool full);
 131   bool should_collect(bool   full,
 132                       size_t word_size,
 133                       bool   is_tlab);
 134 
 135   virtual void compute_new_size();
 136 
 137   // Performance Counter support
 138   void update_counters();
 139 
 140   virtual void record_spaces_top();
 141 
 142   // Statistics
 143 
 144   virtual void update_gc_stats(int level, bool full);
 145 
 146   virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
 147 
 148   virtual void verify();
 149   virtual void print_on(outputStream* st) const;
 150 };
 151 
 152 #endif // SHARE_VM_MEMORY_TENUREDGENERATION_HPP