62 GCPolicyCounters* _gc_policy_counters;
63
64 // Requires that the concrete subclass sets the alignment constraints
65 // before calling.
66 virtual void initialize_flags();
67 virtual void initialize_size_info();
68
69 size_t _initial_heap_byte_size;
70 size_t _max_heap_byte_size;
71 size_t _min_heap_byte_size;
72
73 size_t _min_alignment;
74 size_t _max_alignment;
75
76 // The sizing of the heap are controlled by a sizing policy.
77 AdaptiveSizePolicy* _size_policy;
78
79 // Set to true when policy wants soft refs cleared.
80 // Reset to false by gc after it clears all soft refs.
81 bool _should_clear_all_soft_refs;
82 // Set to true by the GC if the just-completed gc cleared all
83 // softrefs. This is set to true whenever a gc clears all softrefs, and
84 // set to false each time gc returns to the mutator. For example, in the
85 // ParallelScavengeHeap case the latter would be done toward the end of
86 // mem_allocate() where it returns op.result()
87 bool _all_soft_refs_clear;
88
89 CollectorPolicy() :
90 _min_alignment(1),
91 _max_alignment(1),
92 _initial_heap_byte_size(0),
93 _max_heap_byte_size(0),
94 _min_heap_byte_size(0),
95 _size_policy(NULL),
96 _should_clear_all_soft_refs(false),
97 _all_soft_refs_clear(false)
98 {}
99
100 public:
101 // Return maximum heap alignment that may be imposed by the policy
134 virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; }
135 virtual MarkSweepPolicy* as_mark_sweep_policy() { return NULL; }
136 #if INCLUDE_ALL_GCS
137 virtual ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return NULL; }
138 virtual G1CollectorPolicy* as_g1_policy() { return NULL; }
139 #endif // INCLUDE_ALL_GCS
140 // Note that these are not virtual.
141 bool is_generation_policy() { return as_generation_policy() != NULL; }
142 bool is_two_generation_policy() { return as_two_generation_policy() != NULL; }
143 bool is_mark_sweep_policy() { return as_mark_sweep_policy() != NULL; }
144 #if INCLUDE_ALL_GCS
145 bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; }
146 bool is_g1_policy() { return as_g1_policy() != NULL; }
147 #else // INCLUDE_ALL_GCS
148 bool is_concurrent_mark_sweep_policy() { return false; }
149 bool is_g1_policy() { return false; }
150 #endif // INCLUDE_ALL_GCS
151
152
153 virtual BarrierSet::Name barrier_set_name() = 0;
154 virtual GenRemSet::Name rem_set_name() = 0;
155
156 // Create the remembered set (to cover the given reserved region,
157 // allowing breaking up into at most "max_covered_regions").
158 virtual GenRemSet* create_rem_set(MemRegion reserved,
159 int max_covered_regions);
160
161 // This method controls how a collector satisfies a request
162 // for a block of memory. "gc_time_limit_was_exceeded" will
163 // be set to true if the adaptive size policy determine that
164 // an excessive amount of time is being spent doing collections
165 // and caused a NULL to be returned. If a NULL is not returned,
166 // "gc_time_limit_was_exceeded" has an undefined meaning.
167 virtual HeapWord* mem_allocate_work(size_t size,
168 bool is_tlab,
169 bool* gc_overhead_limit_was_exceeded) = 0;
170
171 // This method controls how a collector handles one or more
172 // of its generations being fully allocated.
173 virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
174 // This method controls how a collector handles a metadata allocation
257 virtual GenCollectorPolicy* as_generation_policy() { return this; }
258
259 virtual void initialize_generations() = 0;
260
261 virtual void initialize_all() {
262 initialize_flags();
263 initialize_size_info();
264 initialize_generations();
265 }
266
267 HeapWord* mem_allocate_work(size_t size,
268 bool is_tlab,
269 bool* gc_overhead_limit_was_exceeded);
270
271 HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
272
273 // Adaptive size policy
274 virtual void initialize_size_policy(size_t init_eden_size,
275 size_t init_promo_size,
276 size_t init_survivor_size);
277 };
278
279 // All of hotspot's current collectors are subtypes of this
280 // class. Currently, these collectors all use the same gen[0],
281 // but have different gen[1] types. If we add another subtype
282 // of CollectorPolicy, this class should be broken out into
283 // its own file.
284
285 class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
286 protected:
287 size_t _min_gen1_size;
288 size_t _initial_gen1_size;
289 size_t _max_gen1_size;
290
291 void initialize_flags();
292 void initialize_size_info();
293 void initialize_generations() { ShouldNotReachHere(); }
294
295 public:
296 // Accessors
297 size_t min_gen1_size() { return _min_gen1_size; }
298 size_t initial_gen1_size() { return _initial_gen1_size; }
299 size_t max_gen1_size() { return _max_gen1_size; }
300
301 // Inherited methods
302 TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
303
304 int number_of_generations() { return 2; }
305 BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
306 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
307
308 virtual CollectorPolicy::Name kind() {
309 return CollectorPolicy::TwoGenerationCollectorPolicyKind;
310 }
311
312 // Returns true is gen0 sizes were adjusted
313 bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
314 const size_t heap_size, const size_t min_gen1_size);
315 };
316
317 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
318 protected:
319 void initialize_generations();
320
321 public:
322 MarkSweepPolicy();
323
324 MarkSweepPolicy* as_mark_sweep_policy() { return this; }
325
326 void initialize_gc_policy_counters();
|
62 GCPolicyCounters* _gc_policy_counters;
63
64 // Requires that the concrete subclass sets the alignment constraints
65 // before calling.
66 virtual void initialize_flags();
67 virtual void initialize_size_info();
68
69 size_t _initial_heap_byte_size;
70 size_t _max_heap_byte_size;
71 size_t _min_heap_byte_size;
72
73 size_t _min_alignment;
74 size_t _max_alignment;
75
76 // The sizing of the heap are controlled by a sizing policy.
77 AdaptiveSizePolicy* _size_policy;
78
79 // Set to true when policy wants soft refs cleared.
80 // Reset to false by gc after it clears all soft refs.
81 bool _should_clear_all_soft_refs;
82
83 // Set to true by the GC if the just-completed gc cleared all
84 // softrefs. This is set to true whenever a gc clears all softrefs, and
85 // set to false each time gc returns to the mutator. For example, in the
86 // ParallelScavengeHeap case the latter would be done toward the end of
87 // mem_allocate() where it returns op.result()
88 bool _all_soft_refs_clear;
89
90 CollectorPolicy() :
91 _min_alignment(1),
92 _max_alignment(1),
93 _initial_heap_byte_size(0),
94 _max_heap_byte_size(0),
95 _min_heap_byte_size(0),
96 _size_policy(NULL),
97 _should_clear_all_soft_refs(false),
98 _all_soft_refs_clear(false)
99 {}
100
101 public:
102 // Return maximum heap alignment that may be imposed by the policy
135 virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; }
136 virtual MarkSweepPolicy* as_mark_sweep_policy() { return NULL; }
137 #if INCLUDE_ALL_GCS
138 virtual ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return NULL; }
139 virtual G1CollectorPolicy* as_g1_policy() { return NULL; }
140 #endif // INCLUDE_ALL_GCS
141 // Note that these are not virtual.
142 bool is_generation_policy() { return as_generation_policy() != NULL; }
143 bool is_two_generation_policy() { return as_two_generation_policy() != NULL; }
144 bool is_mark_sweep_policy() { return as_mark_sweep_policy() != NULL; }
145 #if INCLUDE_ALL_GCS
146 bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; }
147 bool is_g1_policy() { return as_g1_policy() != NULL; }
148 #else // INCLUDE_ALL_GCS
149 bool is_concurrent_mark_sweep_policy() { return false; }
150 bool is_g1_policy() { return false; }
151 #endif // INCLUDE_ALL_GCS
152
153
154 virtual BarrierSet::Name barrier_set_name() = 0;
155
156 // Create the remembered set (to cover the given reserved region,
157 // allowing breaking up into at most "max_covered_regions").
158 virtual GenRemSet* create_rem_set(MemRegion reserved,
159 int max_covered_regions);
160
161 // This method controls how a collector satisfies a request
162 // for a block of memory. "gc_time_limit_was_exceeded" will
163 // be set to true if the adaptive size policy determine that
164 // an excessive amount of time is being spent doing collections
165 // and caused a NULL to be returned. If a NULL is not returned,
166 // "gc_time_limit_was_exceeded" has an undefined meaning.
167 virtual HeapWord* mem_allocate_work(size_t size,
168 bool is_tlab,
169 bool* gc_overhead_limit_was_exceeded) = 0;
170
171 // This method controls how a collector handles one or more
172 // of its generations being fully allocated.
173 virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
174 // This method controls how a collector handles a metadata allocation
257 virtual GenCollectorPolicy* as_generation_policy() { return this; }
258
259 virtual void initialize_generations() = 0;
260
261 virtual void initialize_all() {
262 initialize_flags();
263 initialize_size_info();
264 initialize_generations();
265 }
266
267 HeapWord* mem_allocate_work(size_t size,
268 bool is_tlab,
269 bool* gc_overhead_limit_was_exceeded);
270
271 HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
272
273 // Adaptive size policy
274 virtual void initialize_size_policy(size_t init_eden_size,
275 size_t init_promo_size,
276 size_t init_survivor_size);
277
278 // The alignment used for eden and survivors within the young gen
279 // and for boundary between young gen and old gen.
280 static size_t intra_heap_alignment() {
281 return 64 * K * HeapWordSize;
282 }
283 };
284
285 // All of hotspot's current collectors are subtypes of this
286 // class. Currently, these collectors all use the same gen[0],
287 // but have different gen[1] types. If we add another subtype
288 // of CollectorPolicy, this class should be broken out into
289 // its own file.
290
291 class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
292 protected:
293 size_t _min_gen1_size;
294 size_t _initial_gen1_size;
295 size_t _max_gen1_size;
296
297 void initialize_flags();
298 void initialize_size_info();
299 void initialize_generations() { ShouldNotReachHere(); }
300
301 public:
302 // Accessors
303 size_t min_gen1_size() { return _min_gen1_size; }
304 size_t initial_gen1_size() { return _initial_gen1_size; }
305 size_t max_gen1_size() { return _max_gen1_size; }
306
307 // Inherited methods
308 TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
309
310 int number_of_generations() { return 2; }
311 BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
312
313 virtual CollectorPolicy::Name kind() {
314 return CollectorPolicy::TwoGenerationCollectorPolicyKind;
315 }
316
317 // Returns true is gen0 sizes were adjusted
318 bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
319 const size_t heap_size, const size_t min_gen1_size);
320 };
321
322 class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
323 protected:
324 void initialize_generations();
325
326 public:
327 MarkSweepPolicy();
328
329 MarkSweepPolicy* as_mark_sweep_policy() { return this; }
330
331 void initialize_gc_policy_counters();
|