src/share/vm/memory/collectorPolicy.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/collectorPolicy.hpp

Print this page




  44 // are added, it is expected that we will come across further
  45 // behavior that requires global attention. The correct place
  46 // to deal with those issues is this class.
  47 
  48 // Forward declarations.
  49 class GenCollectorPolicy;
  50 class TwoGenerationCollectorPolicy;
  51 class AdaptiveSizePolicy;
  52 #if INCLUDE_ALL_GCS
  53 class ConcurrentMarkSweepPolicy;
  54 class G1CollectorPolicy;
  55 #endif // INCLUDE_ALL_GCS
  56 
  57 class GCPolicyCounters;
  58 class MarkSweepPolicy;
  59 
  60 class CollectorPolicy : public CHeapObj<mtGC> {
  61  protected:
  62   GCPolicyCounters* _gc_policy_counters;
  63 
  64   // Requires that the concrete subclass sets the alignment constraints
  65   // before calling.
  66   virtual void initialize_flags();
  67   virtual void initialize_size_info();
  68 



  69   size_t _initial_heap_byte_size;
  70   size_t _max_heap_byte_size;
  71   size_t _min_heap_byte_size;
  72 
  73   size_t _min_alignment;
  74   size_t _max_alignment;




  75 
  76   // The sizing of the heap are controlled by a sizing policy.
  77   AdaptiveSizePolicy* _size_policy;
  78 
  79   // Set to true when policy wants soft refs cleared.
  80   // Reset to false by gc after it clears all soft refs.
  81   bool _should_clear_all_soft_refs;
  82 
  83   // Set to true by the GC if the just-completed gc cleared all
  84   // softrefs.  This is set to true whenever a gc clears all softrefs, and
  85   // set to false each time gc returns to the mutator.  For example, in the
  86   // ParallelScavengeHeap case the latter would be done toward the end of
  87   // mem_allocate() where it returns op.result()
  88   bool _all_soft_refs_clear;
  89 
  90   CollectorPolicy() :
  91     _min_alignment(1),
  92     _max_alignment(1),
  93     _initial_heap_byte_size(0),
  94     _max_heap_byte_size(0),
  95     _min_heap_byte_size(0),
  96     _size_policy(NULL),
  97     _should_clear_all_soft_refs(false),
  98     _all_soft_refs_clear(false)
  99   {}
 100 
 101  public:






 102   // Return maximum heap alignment that may be imposed by the policy
 103   static size_t compute_max_alignment();
 104 
 105   size_t min_alignment()          { return _min_alignment; }
 106   size_t max_alignment()          { return _max_alignment; }
 107 
 108   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
 109   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
 110   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
 111 
 112   enum Name {
 113     CollectorPolicyKind,
 114     TwoGenerationCollectorPolicyKind,
 115     ConcurrentMarkSweepPolicyKind,
 116     ASConcurrentMarkSweepPolicyKind,
 117     G1CollectorPolicyKind
 118   };
 119 
 120   AdaptiveSizePolicy* size_policy() { return _size_policy; }
 121   bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
 122   void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
 123   // Returns the current value of _should_clear_all_soft_refs.
 124   // _should_clear_all_soft_refs is set to false as a side effect.
 125   bool use_should_clear_all_soft_refs(bool v);
 126   bool all_soft_refs_clear() { return _all_soft_refs_clear; }


 178                                                        Metaspace::MetadataType mdtype);
 179 
 180   // Performace Counter support
 181   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 182 
 183   // Create the jstat counters for the GC policy.  By default, policy's
 184   // don't have associated counters, and we complain if this is invoked.
 185   virtual void initialize_gc_policy_counters() {
 186     ShouldNotReachHere();
 187   }
 188 
 189   virtual CollectorPolicy::Name kind() {
 190     return CollectorPolicy::CollectorPolicyKind;
 191   }
 192 
 193   // Returns true if a collector has eden space with soft end.
 194   virtual bool has_soft_ended_eden() {
 195     return false;
 196   }
 197 



 198 };
 199 
 200 class ClearedAllSoftRefs : public StackObj {
 201   bool _clear_all_soft_refs;
 202   CollectorPolicy* _collector_policy;
 203  public:
 204   ClearedAllSoftRefs(bool clear_all_soft_refs,
 205                      CollectorPolicy* collector_policy) :
 206     _clear_all_soft_refs(clear_all_soft_refs),
 207     _collector_policy(collector_policy) {}
 208 
 209   ~ClearedAllSoftRefs() {
 210     if (_clear_all_soft_refs) {
 211       _collector_policy->cleared_all_soft_refs();
 212     }
 213   }
 214 };
 215 
 216 class GenCollectorPolicy : public CollectorPolicy {
 217  protected:
 218   size_t _min_gen0_size;
 219   size_t _initial_gen0_size;
 220   size_t _max_gen0_size;
 221 




 222   GenerationSpec **_generations;
 223 
 224   // Return true if an allocation should be attempted in the older
 225   // generation if it fails in the younger generation.  Return
 226   // false, otherwise.
 227   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 228 

 229   void initialize_flags();
 230   void initialize_size_info();
 231 



 232   // Try to allocate space by expanding the heap.
 233   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 234 
 235  // Scale the base_size by NewRation according to



 236  //     result = base_size / (NewRatio + 1)
 237  // and align by min_alignment()
 238  size_t scale_by_NewRatio_aligned(size_t base_size);
 239 
 240  // Bound the value by the given maximum minus the
 241  // min_alignment.
 242  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 243 
 244  public:



 245   // Accessors
 246   size_t min_gen0_size()     { return _min_gen0_size; }
 247   size_t initial_gen0_size() { return _initial_gen0_size; }
 248   size_t max_gen0_size()     { return _max_gen0_size; }

 249 
 250   virtual int number_of_generations() = 0;
 251 
 252   virtual GenerationSpec **generations() {
 253     assert(_generations != NULL, "Sanity check");
 254     return _generations;
 255   }
 256 
 257   virtual GenCollectorPolicy* as_generation_policy() { return this; }
 258 
 259   virtual void initialize_generations() = 0;
 260 
 261   virtual void initialize_all() {
 262     initialize_flags();
 263     initialize_size_info();
 264     initialize_generations();
 265   }
 266 


 267   HeapWord* mem_allocate_work(size_t size,
 268                               bool is_tlab,
 269                               bool* gc_overhead_limit_was_exceeded);
 270 
 271   HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
 272 
 273   // Adaptive size policy
 274   virtual void initialize_size_policy(size_t init_eden_size,
 275                                       size_t init_promo_size,
 276                                       size_t init_survivor_size);
 277 
 278   // The alignment used for eden and survivors within the young gen
 279   // and for boundary between young gen and old gen.
 280   static size_t intra_heap_alignment() {



 281     return 64 * K * HeapWordSize;
 282   }
 283 };
 284 
 285 // All of hotspot's current collectors are subtypes of this
 286 // class. Currently, these collectors all use the same gen[0],
 287 // but have different gen[1] types. If we add another subtype
 288 // of CollectorPolicy, this class should be broken out into
 289 // its own file.
 290 
 291 class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
 292  protected:
 293   size_t _min_gen1_size;
 294   size_t _initial_gen1_size;
 295   size_t _max_gen1_size;
 296 
 297   void initialize_flags();
 298   void initialize_size_info();
 299   void initialize_generations()                { ShouldNotReachHere(); }


 300 
 301  public:



 302   // Accessors
 303   size_t min_gen1_size()     { return _min_gen1_size; }
 304   size_t initial_gen1_size() { return _initial_gen1_size; }
 305   size_t max_gen1_size()     { return _max_gen1_size; }
 306 
 307   // Inherited methods
 308   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
 309 
 310   int number_of_generations()          { return 2; }
 311   BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
 312 
 313   virtual CollectorPolicy::Name kind() {
 314     return CollectorPolicy::TwoGenerationCollectorPolicyKind;
 315   }
 316 
 317   // Returns true is gen0 sizes were adjusted
 318   bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
 319                          const size_t heap_size, const size_t min_gen1_size);
 320 };
 321 
 322 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
 323  protected:
 324   void initialize_generations();
 325 
 326  public:
 327   MarkSweepPolicy();
 328 
 329   MarkSweepPolicy* as_mark_sweep_policy() { return this; }
 330 
 331   void initialize_gc_policy_counters();
 332 };
 333 
 334 #endif // SHARE_VM_MEMORY_COLLECTORPOLICY_HPP


  44 // are added, it is expected that we will come across further
  45 // behavior that requires global attention. The correct place
  46 // to deal with those issues is this class.
  47 
  48 // Forward declarations.
  49 class GenCollectorPolicy;
  50 class TwoGenerationCollectorPolicy;
  51 class AdaptiveSizePolicy;
  52 #if INCLUDE_ALL_GCS
  53 class ConcurrentMarkSweepPolicy;
  54 class G1CollectorPolicy;
  55 #endif // INCLUDE_ALL_GCS
  56 
  57 class GCPolicyCounters;
  58 class MarkSweepPolicy;
  59 
  60 class CollectorPolicy : public CHeapObj<mtGC> {
  61  protected:
  62   GCPolicyCounters* _gc_policy_counters;
  63 
  64   virtual void initialize_alignments();

  65   virtual void initialize_flags();
  66   virtual void initialize_size_info();
  67 
  68   virtual void assert_flags();
  69   virtual void assert_size_info();
  70 
  71   size_t _initial_heap_byte_size;
  72   size_t _max_heap_byte_size;
  73   size_t _min_heap_byte_size;
  74 
  75   size_t _space_alignment;
  76   size_t _heap_alignment;
  77 
  78   // Needed to keep information if MaxHeapSize was set on the command line
  79   // when the flag value is aligned etc by ergonomics
  80   bool _max_heap_size_cmdline;
  81 
  82   // The sizing of the heap are controlled by a sizing policy.
  83   AdaptiveSizePolicy* _size_policy;
  84 
  85   // Set to true when policy wants soft refs cleared.
  86   // Reset to false by gc after it clears all soft refs.
  87   bool _should_clear_all_soft_refs;
  88 
  89   // Set to true by the GC if the just-completed gc cleared all
  90   // softrefs.  This is set to true whenever a gc clears all softrefs, and
  91   // set to false each time gc returns to the mutator.  For example, in the
  92   // ParallelScavengeHeap case the latter would be done toward the end of
  93   // mem_allocate() where it returns op.result()
  94   bool _all_soft_refs_clear;
  95 
  96   CollectorPolicy();









  97 
  98  public:
  99   virtual void initialize_all() {
 100     initialize_alignments();
 101     initialize_flags();
 102     initialize_size_info();
 103   }
 104 
 105   // Return maximum heap alignment that may be imposed by the policy
 106   static size_t compute_heap_alignment();
 107 
 108   size_t space_alignment()        { return _space_alignment; }
 109   size_t heap_alignment()         { return _heap_alignment; }
 110 
 111   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
 112   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
 113   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
 114 
 115   enum Name {
 116     CollectorPolicyKind,
 117     TwoGenerationCollectorPolicyKind,
 118     ConcurrentMarkSweepPolicyKind,
 119     ASConcurrentMarkSweepPolicyKind,
 120     G1CollectorPolicyKind
 121   };
 122 
 123   AdaptiveSizePolicy* size_policy() { return _size_policy; }
 124   bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
 125   void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
 126   // Returns the current value of _should_clear_all_soft_refs.
 127   // _should_clear_all_soft_refs is set to false as a side effect.
 128   bool use_should_clear_all_soft_refs(bool v);
 129   bool all_soft_refs_clear() { return _all_soft_refs_clear; }


 181                                                        Metaspace::MetadataType mdtype);
 182 
 183   // Performace Counter support
 184   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 185 
 186   // Create the jstat counters for the GC policy.  By default, policy's
 187   // don't have associated counters, and we complain if this is invoked.
 188   virtual void initialize_gc_policy_counters() {
 189     ShouldNotReachHere();
 190   }
 191 
 192   virtual CollectorPolicy::Name kind() {
 193     return CollectorPolicy::CollectorPolicyKind;
 194   }
 195 
 196   // Returns true if a collector has eden space with soft end.
 197   virtual bool has_soft_ended_eden() {
 198     return false;
 199   }
 200 
 201   // Do any updates required to global flags that are due to heap initialization
 202   // changes
 203   virtual void post_heap_initialize() = 0;
 204 };
 205 
 206 class ClearedAllSoftRefs : public StackObj {
 207   bool _clear_all_soft_refs;
 208   CollectorPolicy* _collector_policy;
 209  public:
 210   ClearedAllSoftRefs(bool clear_all_soft_refs,
 211                      CollectorPolicy* collector_policy) :
 212     _clear_all_soft_refs(clear_all_soft_refs),
 213     _collector_policy(collector_policy) {}
 214 
 215   ~ClearedAllSoftRefs() {
 216     if (_clear_all_soft_refs) {
 217       _collector_policy->cleared_all_soft_refs();
 218     }
 219   }
 220 };
 221 
 222 class GenCollectorPolicy : public CollectorPolicy {
 223  protected:
 224   size_t _min_gen0_size;
 225   size_t _initial_gen0_size;
 226   size_t _max_gen0_size;
 227 
 228   // _gen_alignment and _space_alignment will have the same value most of the
 229   // time. When using large pages they can differ.
 230   size_t _gen_alignment;
 231 
 232   GenerationSpec **_generations;
 233 
 234   // Return true if an allocation should be attempted in the older
 235   // generation if it fails in the younger generation.  Return
 236   // false, otherwise.
 237   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 238 
 239   void initialize_alignments();
 240   void initialize_flags();
 241   void initialize_size_info();
 242 
 243   void assert_flags();
 244   void assert_size_info();
 245 
 246   // Try to allocate space by expanding the heap.
 247   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 248 
 249   // Compute max heap alignment
 250   size_t compute_max_alignment();
 251 
 252  // Scale the base_size by NewRatio according to
 253  //     result = base_size / (NewRatio + 1)
 254  // and align by min_alignment()
 255  size_t scale_by_NewRatio_aligned(size_t base_size);
 256 
 257  // Bound the value by the given maximum minus the min_alignment

 258  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 259 
 260  public:
 261   GenCollectorPolicy() : CollectorPolicy(), _min_gen0_size(0), _initial_gen0_size(0),
 262     _max_gen0_size(0), _generations(NULL) {}
 263 
 264   // Accessors
 265   size_t min_gen0_size()     { return _min_gen0_size; }
 266   size_t initial_gen0_size() { return _initial_gen0_size; }
 267   size_t max_gen0_size()     { return _max_gen0_size; }
 268   size_t gen_alignment()     { return _gen_alignment; }
 269 
 270   virtual int number_of_generations() = 0;
 271 
 272   virtual GenerationSpec **generations() {
 273     assert(_generations != NULL, "Sanity check");
 274     return _generations;
 275   }
 276 
 277   virtual GenCollectorPolicy* as_generation_policy() { return this; }
 278 
 279   virtual void initialize_generations() { };
 280 
 281   virtual void initialize_all() {
 282     CollectorPolicy::initialize_all();

 283     initialize_generations();
 284   }
 285 
 286   size_t young_gen_size_lower_bound();
 287 
 288   HeapWord* mem_allocate_work(size_t size,
 289                               bool is_tlab,
 290                               bool* gc_overhead_limit_was_exceeded);
 291 
 292   HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
 293 
 294   // Adaptive size policy
 295   virtual void initialize_size_policy(size_t init_eden_size,
 296                                       size_t init_promo_size,
 297                                       size_t init_survivor_size);
 298 
 299   virtual void post_heap_initialize() {
 300     assert(_max_gen0_size == MaxNewSize, "Should be taken care of by initialize_size_info");
 301   }
 302 
 303   // The alignment used for boundary between young gen and old gen
 304   static size_t default_gen_alignment() {
 305     return 64 * K * HeapWordSize;
 306   }
 307 };
 308 
 309 // All of hotspot's current collectors are subtypes of this
 310 // class. Currently, these collectors all use the same gen[0],
 311 // but have different gen[1] types. If we add another subtype
 312 // of CollectorPolicy, this class should be broken out into
 313 // its own file.
 314 
 315 class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
 316  protected:
 317   size_t _min_gen1_size;
 318   size_t _initial_gen1_size;
 319   size_t _max_gen1_size;
 320 
 321   void initialize_flags();
 322   void initialize_size_info();
 323 
 324   void assert_flags();
 325   void assert_size_info();
 326 
 327  public:
 328   TwoGenerationCollectorPolicy() : GenCollectorPolicy(), _min_gen1_size(0),
 329     _initial_gen1_size(0), _max_gen1_size(0) {}
 330 
 331   // Accessors
 332   size_t min_gen1_size()     { return _min_gen1_size; }
 333   size_t initial_gen1_size() { return _initial_gen1_size; }
 334   size_t max_gen1_size()     { return _max_gen1_size; }
 335 
 336   // Inherited methods
 337   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
 338 
 339   int number_of_generations()          { return 2; }
 340   BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
 341 
 342   virtual CollectorPolicy::Name kind() {
 343     return CollectorPolicy::TwoGenerationCollectorPolicyKind;
 344   }
 345 
 346   // Returns true is gen0 sizes were adjusted
 347   bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
 348                          const size_t heap_size, const size_t min_gen1_size);
 349 };
 350 
 351 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
 352  protected:
 353   void initialize_generations();
 354 
 355  public:
 356   MarkSweepPolicy() {}
 357 
 358   MarkSweepPolicy* as_mark_sweep_policy() { return this; }
 359 
 360   void initialize_gc_policy_counters();
 361 };
 362 
 363 #endif // SHARE_VM_MEMORY_COLLECTORPOLICY_HPP
src/share/vm/memory/collectorPolicy.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File