src/share/vm/memory/collectorPolicy.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/collectorPolicy.hpp

Print this page




  56 
  57 class GCPolicyCounters;
  58 class MarkSweepPolicy;
  59 
  60 class CollectorPolicy : public CHeapObj<mtGC> {
  61  protected:
  62   GCPolicyCounters* _gc_policy_counters;
  63 
  64   // Requires that the concrete subclass sets the alignment constraints
  65   // before calling.
  66   virtual void initialize_flags();
  67   virtual void initialize_size_info();
  68 
  69   size_t _initial_heap_byte_size;
  70   size_t _max_heap_byte_size;
  71   size_t _min_heap_byte_size;
  72 
  73   size_t _min_alignment;
  74   size_t _max_alignment;
  75 
  76   // The sizing of the heap are controlled by a sizing policy.
  77   AdaptiveSizePolicy* _size_policy;
  78 
  79   // Set to true when policy wants soft refs cleared.
  80   // Reset to false by gc after it clears all soft refs.
  81   bool _should_clear_all_soft_refs;

  82   // Set to true by the GC if the just-completed gc cleared all
  83   // softrefs.  This is set to true whenever a gc clears all softrefs, and
  84   // set to false each time gc returns to the mutator.  For example, in the
  85   // ParallelScavengeHeap case the latter would be done toward the end of
  86   // mem_allocate() where it returns op.result()
  87   bool _all_soft_refs_clear;
  88 
  89   CollectorPolicy() :
  90     _min_alignment(1),
  91     _max_alignment(1),
  92     _initial_heap_byte_size(0),
  93     _max_heap_byte_size(0),
  94     _min_heap_byte_size(0),
  95     _size_policy(NULL),
  96     _should_clear_all_soft_refs(false),
  97     _all_soft_refs_clear(false)
  98   {}
  99 
 100  public:
 101   // Return maximum heap alignment that may be imposed by the policy
 102   static size_t compute_max_alignment();
 103 
 104   void set_min_alignment(size_t align)         { _min_alignment = align; }
 105   size_t min_alignment()                       { return _min_alignment; }
 106   void set_max_alignment(size_t align)         { _max_alignment = align; }
 107   size_t max_alignment()                       { return _max_alignment; }
 108 
 109   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
 110   void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
 111   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
 112   void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
 113   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
 114   void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
 115 
 116   enum Name {
 117     CollectorPolicyKind,
 118     TwoGenerationCollectorPolicyKind,
 119     ConcurrentMarkSweepPolicyKind,
 120     ASConcurrentMarkSweepPolicyKind,
 121     G1CollectorPolicyKind
 122   };
 123 
 124   AdaptiveSizePolicy* size_policy() { return _size_policy; }
 125   bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
 126   void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
 127   // Returns the current value of _should_clear_all_soft_refs.
 128   // _should_clear_all_soft_refs is set to false as a side effect.
 129   bool use_should_clear_all_soft_refs(bool v);
 130   bool all_soft_refs_clear() { return _all_soft_refs_clear; }
 131   void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
 132 
 133   // Called by the GC after Soft Refs have been cleared to indicate
 134   // that the request in _should_clear_all_soft_refs has been fulfilled.


 165 
 166   // This method controls how a collector satisfies a request
 167   // for a block of memory.  "gc_time_limit_was_exceeded" will
 168   // be set to true if the adaptive size policy determine that
 169   // an excessive amount of time is being spent doing collections
 170   // and caused a NULL to be returned.  If a NULL is not returned,
 171   // "gc_time_limit_was_exceeded" has an undefined meaning.
 172   virtual HeapWord* mem_allocate_work(size_t size,
 173                                       bool is_tlab,
 174                                       bool* gc_overhead_limit_was_exceeded) = 0;
 175 
 176   // This method controls how a collector handles one or more
 177   // of its generations being fully allocated.
 178   virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
 179   // This method controls how a collector handles a metadata allocation
 180   // failure.
 181   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 182                                                        size_t size,
 183                                                        Metaspace::MetadataType mdtype);
 184 
 185   // Performace Counter support
 186   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 187 
 188   // Create the jstat counters for the GC policy.  By default, policy's
 189   // don't have associated counters, and we complain if this is invoked.
 190   virtual void initialize_gc_policy_counters() {
 191     ShouldNotReachHere();
 192   }
 193 
 194   virtual CollectorPolicy::Name kind() {
 195     return CollectorPolicy::CollectorPolicyKind;
 196   }
 197 
 198   // Returns true if a collector has eden space with soft end.
 199   virtual bool has_soft_ended_eden() {
 200     return false;
 201   }
 202 
 203 };
 204 
 205 class ClearedAllSoftRefs : public StackObj {


 220 
 221 class GenCollectorPolicy : public CollectorPolicy {
 222  protected:
 223   size_t _min_gen0_size;
 224   size_t _initial_gen0_size;
 225   size_t _max_gen0_size;
 226 
 227   GenerationSpec **_generations;
 228 
 229   // Return true if an allocation should be attempted in the older
 230   // generation if it fails in the younger generation.  Return
 231   // false, otherwise.
 232   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 233 
 234   void initialize_flags();
 235   void initialize_size_info();
 236 
 237   // Try to allocate space by expanding the heap.
 238   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 239 
 240  // Scale the base_size by NewRation according to
 241  //     result = base_size / (NewRatio + 1)
 242  // and align by min_alignment()
 243  size_t scale_by_NewRatio_aligned(size_t base_size);
 244 
 245  // Bound the value by the given maximum minus the
 246  // min_alignment.
 247  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 248 
 249  public:
 250   // Accessors
 251   size_t min_gen0_size() { return _min_gen0_size; }
 252   void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
 253   size_t initial_gen0_size() { return _initial_gen0_size; }
 254   void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
 255   size_t max_gen0_size() { return _max_gen0_size; }
 256   void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
 257 
 258   virtual int number_of_generations() = 0;
 259 
 260   virtual GenerationSpec **generations()       {
 261     assert(_generations != NULL, "Sanity check");
 262     return _generations;
 263   }
 264 
 265   virtual GenCollectorPolicy* as_generation_policy() { return this; }
 266 
 267   virtual void initialize_generations() = 0;
 268 
 269   virtual void initialize_all() {
 270     initialize_flags();
 271     initialize_size_info();
 272     initialize_generations();
 273   }
 274 
 275   HeapWord* mem_allocate_work(size_t size,
 276                               bool is_tlab,


 286 
 287 // All of hotspot's current collectors are subtypes of this
 288 // class. Currently, these collectors all use the same gen[0],
 289 // but have different gen[1] types. If we add another subtype
 290 // of CollectorPolicy, this class should be broken out into
 291 // its own file.
 292 
 293 class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
 294  protected:
 295   size_t _min_gen1_size;
 296   size_t _initial_gen1_size;
 297   size_t _max_gen1_size;
 298 
 299   void initialize_flags();
 300   void initialize_size_info();
 301   void initialize_generations()                { ShouldNotReachHere(); }
 302 
 303  public:
 304   // Accessors
 305   size_t min_gen1_size() { return _min_gen1_size; }
 306   void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
 307   size_t initial_gen1_size() { return _initial_gen1_size; }
 308   void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
 309   size_t max_gen1_size() { return _max_gen1_size; }
 310   void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
 311 
 312   // Inherited methods
 313   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
 314 
 315   int number_of_generations()                  { return 2; }
 316   BarrierSet::Name barrier_set_name()          { return BarrierSet::CardTableModRef; }
 317   GenRemSet::Name rem_set_name()               { return GenRemSet::CardTable; }
 318 
 319   virtual CollectorPolicy::Name kind() {
 320     return CollectorPolicy::TwoGenerationCollectorPolicyKind;
 321   }
 322 
 323   // Returns true is gen0 sizes were adjusted
 324   bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
 325                          const size_t heap_size, const size_t min_gen1_size);
 326 };
 327 
 328 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
 329  protected:
 330   void initialize_generations();


  56 
  57 class GCPolicyCounters;
  58 class MarkSweepPolicy;
  59 
  60 class CollectorPolicy : public CHeapObj<mtGC> {
  61  protected:
  62   GCPolicyCounters* _gc_policy_counters;
  63 
  64   // Requires that the concrete subclass sets the alignment constraints
  65   // before calling.
  66   virtual void initialize_flags();
  67   virtual void initialize_size_info();
  68 
  69   size_t _initial_heap_byte_size;
  70   size_t _max_heap_byte_size;
  71   size_t _min_heap_byte_size;
  72 
  73   size_t _min_alignment;
  74   size_t _max_alignment;
  75 
  76   // The sizing of the heap is controlled by a sizing policy.
  77   AdaptiveSizePolicy* _size_policy;
  78 
  79   // Set to true when policy wants soft refs cleared.
  80   // Reset to false by gc after it clears all soft refs.
  81   bool _should_clear_all_soft_refs;
  82 
  83   // Set to true by the GC if the just-completed gc cleared all
  84   // softrefs.  This is set to true whenever a gc clears all softrefs, and
  85   // set to false each time gc returns to the mutator.  For example, in the
  86   // ParallelScavengeHeap case the latter would be done toward the end of
  87   // mem_allocate() where it returns op.result()
  88   bool _all_soft_refs_clear;
  89 
  90   CollectorPolicy() :
  91     _min_alignment(1),
  92     _max_alignment(1),
  93     _initial_heap_byte_size(0),
  94     _max_heap_byte_size(0),
  95     _min_heap_byte_size(0),
  96     _size_policy(NULL),
  97     _should_clear_all_soft_refs(false),
  98     _all_soft_refs_clear(false)
  99   {}
 100 
 101  public:
 102   // Return maximum heap alignment that may be imposed by the policy
 103   static size_t compute_max_alignment();
 104 

 105   size_t min_alignment()                       { return _min_alignment; }

 106   size_t max_alignment()                       { return _max_alignment; }
 107 
 108   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }

 109   size_t max_heap_byte_size()     { return _max_heap_byte_size; }

 110   size_t min_heap_byte_size()     { return _min_heap_byte_size; }

 111 
 112   enum Name {
 113     CollectorPolicyKind,
 114     TwoGenerationCollectorPolicyKind,
 115     ConcurrentMarkSweepPolicyKind,
 116     ASConcurrentMarkSweepPolicyKind,
 117     G1CollectorPolicyKind
 118   };
 119 
 120   AdaptiveSizePolicy* size_policy() { return _size_policy; }
 121   bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
 122   void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
 123   // Returns the current value of _should_clear_all_soft_refs.
 124   // _should_clear_all_soft_refs is set to false as a side effect.
 125   bool use_should_clear_all_soft_refs(bool v);
 126   bool all_soft_refs_clear() { return _all_soft_refs_clear; }
 127   void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
 128 
 129   // Called by the GC after Soft Refs have been cleared to indicate
 130   // that the request in _should_clear_all_soft_refs has been fulfilled.


 161 
 162   // This method controls how a collector satisfies a request
 163   // for a block of memory.  "gc_time_limit_was_exceeded" will
 164   // be set to true if the adaptive size policy determine that
 165   // an excessive amount of time is being spent doing collections
 166   // and caused a NULL to be returned.  If a NULL is not returned,
 167   // "gc_time_limit_was_exceeded" has an undefined meaning.
 168   virtual HeapWord* mem_allocate_work(size_t size,
 169                                       bool is_tlab,
 170                                       bool* gc_overhead_limit_was_exceeded) = 0;
 171 
 172   // This method controls how a collector handles one or more
 173   // of its generations being fully allocated.
 174   virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
 175   // This method controls how a collector handles a metadata allocation
 176   // failure.
 177   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 178                                                        size_t size,
 179                                                        Metaspace::MetadataType mdtype);
 180 
 181   // Performance Counter support
 182   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 183 
 184   // Create the jstat counters for the GC policy.  By default, policy's
 185   // don't have associated counters, and we complain if this is invoked.
 186   virtual void initialize_gc_policy_counters() {
 187     ShouldNotReachHere();
 188   }
 189 
 190   virtual CollectorPolicy::Name kind() {
 191     return CollectorPolicy::CollectorPolicyKind;
 192   }
 193 
 194   // Returns true if a collector has eden space with soft end.
 195   virtual bool has_soft_ended_eden() {
 196     return false;
 197   }
 198 
 199 };
 200 
 201 class ClearedAllSoftRefs : public StackObj {


 216 
 217 class GenCollectorPolicy : public CollectorPolicy {
 218  protected:
 219   size_t _min_gen0_size;
 220   size_t _initial_gen0_size;
 221   size_t _max_gen0_size;
 222 
 223   GenerationSpec **_generations;
 224 
 225   // Return true if an allocation should be attempted in the older
 226   // generation if it fails in the younger generation.  Return
 227   // false, otherwise.
 228   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 229 
 230   void initialize_flags();
 231   void initialize_size_info();
 232 
 233   // Try to allocate space by expanding the heap.
 234   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 235 
 236  // Scale the base_size by NewRatio according to
 237  //     result = base_size / (NewRatio + 1)
 238  // and align by min_alignment()
 239  size_t scale_by_NewRatio_aligned(size_t base_size);
 240 
 241  // Bound the value by the given maximum minus the
 242  // min_alignment.
 243  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 244 
 245  public:
 246   // Accessors
 247   size_t min_gen0_size()     { return _min_gen0_size; }

 248   size_t initial_gen0_size() { return _initial_gen0_size; }

 249   size_t max_gen0_size()     { return _max_gen0_size; }

 250 
 251   virtual int number_of_generations() = 0;
 252 
 253   virtual GenerationSpec **generations() {
 254     assert(_generations != NULL, "Sanity check");
 255     return _generations;
 256   }
 257 
 258   virtual GenCollectorPolicy* as_generation_policy() { return this; }
 259 
 260   virtual void initialize_generations() = 0;
 261 
 262   virtual void initialize_all() {
 263     initialize_flags();
 264     initialize_size_info();
 265     initialize_generations();
 266   }
 267 
 268   HeapWord* mem_allocate_work(size_t size,
 269                               bool is_tlab,


 279 
 280 // All of hotspot's current collectors are subtypes of this
 281 // class. Currently, these collectors all use the same gen[0],
 282 // but have different gen[1] types. If we add another subtype
 283 // of CollectorPolicy, this class should be broken out into
 284 // its own file.
 285 
 286 class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
 287  protected:
 288   size_t _min_gen1_size;
 289   size_t _initial_gen1_size;
 290   size_t _max_gen1_size;
 291 
 292   void initialize_flags();
 293   void initialize_size_info();
 294   void initialize_generations()                { ShouldNotReachHere(); }
 295 
 296  public:
 297   // Accessors
 298   size_t min_gen1_size()     { return _min_gen1_size; }

 299   size_t initial_gen1_size() { return _initial_gen1_size; }

 300   size_t max_gen1_size()     { return _max_gen1_size; }

 301 
 302   // Inherited methods
 303   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
 304 
 305   int number_of_generations()          { return 2; }
 306   BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
 307   GenRemSet::Name rem_set_name()       { return GenRemSet::CardTable; }
 308 
 309   virtual CollectorPolicy::Name kind() {
 310     return CollectorPolicy::TwoGenerationCollectorPolicyKind;
 311   }
 312 
 313   // Returns true is gen0 sizes were adjusted
 314   bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
 315                          const size_t heap_size, const size_t min_gen1_size);
 316 };
 317 
 318 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
 319  protected:
 320   void initialize_generations();
src/share/vm/memory/collectorPolicy.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File