1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_COLLECTORPOLICY_HPP
  26 #define SHARE_VM_MEMORY_COLLECTORPOLICY_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/barrierSet.hpp"
  30 #include "memory/generationSpec.hpp"
  31 #include "memory/genRemSet.hpp"
  32 #include "utilities/macros.hpp"
  33 
  34 // This class (or more correctly, subtypes of this class)
  35 // are used to define global garbage collector attributes.
  36 // This includes initialization of generations and any other
  37 // shared resources they may need.
  38 //
  39 // In general, all flag adjustment and validation should be
  40 // done in initialize_flags(), which is called prior to
  41 // initialize_size_info().
  42 //
  43 // This class is not fully developed yet. As more collector(s)
  44 // are added, it is expected that we will come across further
  45 // behavior that requires global attention. The correct place
  46 // to deal with those issues is this class.
  47 
  48 // Forward declarations.
  49 class GenCollectorPolicy;
  50 class TwoGenerationCollectorPolicy;
  51 class AdaptiveSizePolicy;
  52 #if INCLUDE_ALL_GCS
  53 class ConcurrentMarkSweepPolicy;
  54 class G1CollectorPolicy;
  55 #endif // INCLUDE_ALL_GCS
  56 
  57 class GCPolicyCounters;
  58 class MarkSweepPolicy;
  59 
  60 class CollectorPolicy : public CHeapObj<mtGC> {
  61  protected:
  62   GCPolicyCounters* _gc_policy_counters;
  63 
  64   // Requires that the concrete subclass sets the alignment constraints
  65   // before calling.
  66   virtual void initialize_flags();
  67   virtual void initialize_size_info();
  68 
  69   size_t _initial_heap_byte_size;
  70   size_t _max_heap_byte_size;
  71   size_t _min_heap_byte_size;
  72 
  73   size_t _min_alignment;
  74   size_t _max_alignment;
  75 
  76   // The sizing of the heap are controlled by a sizing policy.
  77   AdaptiveSizePolicy* _size_policy;
  78 
  79   // Set to true when policy wants soft refs cleared.
  80   // Reset to false by gc after it clears all soft refs.
  81   bool _should_clear_all_soft_refs;
  82   // Set to true by the GC if the just-completed gc cleared all
  83   // softrefs.  This is set to true whenever a gc clears all softrefs, and
  84   // set to false each time gc returns to the mutator.  For example, in the
  85   // ParallelScavengeHeap case the latter would be done toward the end of
  86   // mem_allocate() where it returns op.result()
  87   bool _all_soft_refs_clear;
  88 
  89   CollectorPolicy() :
  90     _min_alignment(1),
  91     _max_alignment(1),
  92     _initial_heap_byte_size(0),
  93     _max_heap_byte_size(0),
  94     _min_heap_byte_size(0),
  95     _size_policy(NULL),
  96     _should_clear_all_soft_refs(false),
  97     _all_soft_refs_clear(false)
  98   {}
  99 
 100  public:
 101   void set_min_alignment(size_t align)         { _min_alignment = align; }
 102   size_t min_alignment()                       { return _min_alignment; }
 103   void set_max_alignment(size_t align)         { _max_alignment = align; }
 104   size_t max_alignment()                       { return _max_alignment; }
 105 
 106   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
 107   void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
 108   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
 109   void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
 110   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
 111   void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
 112 
 113   enum Name {
 114     CollectorPolicyKind,
 115     TwoGenerationCollectorPolicyKind,
 116     ConcurrentMarkSweepPolicyKind,
 117     ASConcurrentMarkSweepPolicyKind,
 118     G1CollectorPolicyKind
 119   };
 120 
 121   AdaptiveSizePolicy* size_policy() { return _size_policy; }
 122   bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
 123   void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
 124   // Returns the current value of _should_clear_all_soft_refs.
 125   // _should_clear_all_soft_refs is set to false as a side effect.
 126   bool use_should_clear_all_soft_refs(bool v);
 127   bool all_soft_refs_clear() { return _all_soft_refs_clear; }
 128   void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
 129 
 130   // Called by the GC after Soft Refs have been cleared to indicate
 131   // that the request in _should_clear_all_soft_refs has been fulfilled.
 132   void cleared_all_soft_refs();
 133 
 134   // Identification methods.
 135   virtual GenCollectorPolicy*           as_generation_policy()            { return NULL; }
 136   virtual TwoGenerationCollectorPolicy* as_two_generation_policy()        { return NULL; }
 137   virtual MarkSweepPolicy*              as_mark_sweep_policy()            { return NULL; }
 138 #if INCLUDE_ALL_GCS
 139   virtual ConcurrentMarkSweepPolicy*    as_concurrent_mark_sweep_policy() { return NULL; }
 140   virtual G1CollectorPolicy*            as_g1_policy()                    { return NULL; }
 141 #endif // INCLUDE_ALL_GCS
 142   // Note that these are not virtual.
 143   bool is_generation_policy()            { return as_generation_policy() != NULL; }
 144   bool is_two_generation_policy()        { return as_two_generation_policy() != NULL; }
 145   bool is_mark_sweep_policy()            { return as_mark_sweep_policy() != NULL; }
 146 #if INCLUDE_ALL_GCS
 147   bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; }
 148   bool is_g1_policy()                    { return as_g1_policy() != NULL; }
 149 #else  // INCLUDE_ALL_GCS
 150   bool is_concurrent_mark_sweep_policy() { return false; }
 151   bool is_g1_policy()                    { return false; }
 152 #endif // INCLUDE_ALL_GCS
 153 
 154 
 155   virtual BarrierSet::Name barrier_set_name() = 0;
 156   virtual GenRemSet::Name  rem_set_name() = 0;
 157 
 158   // Create the remembered set (to cover the given reserved region,
 159   // allowing breaking up into at most "max_covered_regions").
 160   virtual GenRemSet* create_rem_set(MemRegion reserved,
 161                                     int max_covered_regions);
 162 
 163   // This method controls how a collector satisfies a request
 164   // for a block of memory.  "gc_time_limit_was_exceeded" will
 165   // be set to true if the adaptive size policy determine that
 166   // an excessive amount of time is being spent doing collections
 167   // and caused a NULL to be returned.  If a NULL is not returned,
 168   // "gc_time_limit_was_exceeded" has an undefined meaning.
 169   virtual HeapWord* mem_allocate_work(size_t size,
 170                                       bool is_tlab,
 171                                       bool* gc_overhead_limit_was_exceeded) = 0;
 172 
 173   // This method controls how a collector handles one or more
 174   // of its generations being fully allocated.
 175   virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
 176   // This method controls how a collector handles a metadata allocation
 177   // failure.
 178   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 179                                                        size_t size,
 180                                                        Metaspace::MetadataType mdtype);
 181 
 182   // Performace Counter support
 183   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 184 
 185   // Create the jstat counters for the GC policy.  By default, policy's
 186   // don't have associated counters, and we complain if this is invoked.
 187   virtual void initialize_gc_policy_counters() {
 188     ShouldNotReachHere();
 189   }
 190 
 191   virtual CollectorPolicy::Name kind() {
 192     return CollectorPolicy::CollectorPolicyKind;
 193   }
 194 
 195   // Returns true if a collector has eden space with soft end.
 196   virtual bool has_soft_ended_eden() {
 197     return false;
 198   }
 199 
 200 };
 201 
 202 class ClearedAllSoftRefs : public StackObj {
 203   bool _clear_all_soft_refs;
 204   CollectorPolicy* _collector_policy;
 205  public:
 206   ClearedAllSoftRefs(bool clear_all_soft_refs,
 207                      CollectorPolicy* collector_policy) :
 208     _clear_all_soft_refs(clear_all_soft_refs),
 209     _collector_policy(collector_policy) {}
 210 
 211   ~ClearedAllSoftRefs() {
 212     if (_clear_all_soft_refs) {
 213       _collector_policy->cleared_all_soft_refs();
 214     }
 215   }
 216 };
 217 
 218 class GenCollectorPolicy : public CollectorPolicy {
 219  protected:
 220   size_t _min_gen0_size;
 221   size_t _initial_gen0_size;
 222   size_t _max_gen0_size;
 223 
 224   GenerationSpec **_generations;
 225 
 226   // Return true if an allocation should be attempted in the older
 227   // generation if it fails in the younger generation.  Return
 228   // false, otherwise.
 229   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 230 
 231   void initialize_flags();
 232   void initialize_size_info();
 233 
 234   // Try to allocate space by expanding the heap.
 235   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 236 
 237   // compute max heap alignment
 238   size_t compute_max_alignment();
 239 
 240  // Scale the base_size by NewRation according to
 241  //     result = base_size / (NewRatio + 1)
 242  // and align by min_alignment()
 243  size_t scale_by_NewRatio_aligned(size_t base_size);
 244 
 245  // Bound the value by the given maximum minus the
 246  // min_alignment.
 247  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 248 
 249  public:
 250   // Accessors
 251   size_t min_gen0_size() { return _min_gen0_size; }
 252   void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
 253   size_t initial_gen0_size() { return _initial_gen0_size; }
 254   void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
 255   size_t max_gen0_size() { return _max_gen0_size; }
 256   void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
 257 
 258   virtual int number_of_generations() = 0;
 259 
 260   virtual GenerationSpec **generations()       {
 261     assert(_generations != NULL, "Sanity check");
 262     return _generations;
 263   }
 264 
 265   virtual GenCollectorPolicy* as_generation_policy() { return this; }
 266 
 267   virtual void initialize_generations() = 0;
 268 
 269   virtual void initialize_all() {
 270     initialize_flags();
 271     initialize_size_info();
 272     initialize_generations();
 273   }
 274 
 275   HeapWord* mem_allocate_work(size_t size,
 276                               bool is_tlab,
 277                               bool* gc_overhead_limit_was_exceeded);
 278 
 279   HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
 280 
 281   // Adaptive size policy
 282   virtual void initialize_size_policy(size_t init_eden_size,
 283                                       size_t init_promo_size,
 284                                       size_t init_survivor_size);
 285 };
 286 
 287 // All of hotspot's current collectors are subtypes of this
 288 // class. Currently, these collectors all use the same gen[0],
 289 // but have different gen[1] types. If we add another subtype
 290 // of CollectorPolicy, this class should be broken out into
 291 // its own file.
 292 
 293 class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
 294  protected:
 295   size_t _min_gen1_size;
 296   size_t _initial_gen1_size;
 297   size_t _max_gen1_size;
 298 
 299   void initialize_flags();
 300   void initialize_size_info();
 301   void initialize_generations()                { ShouldNotReachHere(); }
 302 
 303  public:
 304   // Accessors
 305   size_t min_gen1_size() { return _min_gen1_size; }
 306   void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
 307   size_t initial_gen1_size() { return _initial_gen1_size; }
 308   void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
 309   size_t max_gen1_size() { return _max_gen1_size; }
 310   void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
 311 
 312   // Inherited methods
 313   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
 314 
 315   int number_of_generations()                  { return 2; }
 316   BarrierSet::Name barrier_set_name()          { return BarrierSet::CardTableModRef; }
 317   GenRemSet::Name rem_set_name()               { return GenRemSet::CardTable; }
 318 
 319   virtual CollectorPolicy::Name kind() {
 320     return CollectorPolicy::TwoGenerationCollectorPolicyKind;
 321   }
 322 
 323   // Returns true is gen0 sizes were adjusted
 324   bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
 325                          const size_t heap_size, const size_t min_gen1_size);
 326 };
 327 
 328 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
 329  protected:
 330   void initialize_generations();
 331 
 332  public:
 333   MarkSweepPolicy();
 334 
 335   MarkSweepPolicy* as_mark_sweep_policy() { return this; }
 336 
 337   void initialize_gc_policy_counters();
 338 };
 339 
 340 #endif // SHARE_VM_MEMORY_COLLECTORPOLICY_HPP