199 virtual void post_heap_initialize() = 0;
200 };
201
202 class ClearedAllSoftRefs : public StackObj {
203 bool _clear_all_soft_refs;
204 CollectorPolicy* _collector_policy;
205 public:
206 ClearedAllSoftRefs(bool clear_all_soft_refs,
207 CollectorPolicy* collector_policy) :
208 _clear_all_soft_refs(clear_all_soft_refs),
209 _collector_policy(collector_policy) {}
210
211 ~ClearedAllSoftRefs() {
212 if (_clear_all_soft_refs) {
213 _collector_policy->cleared_all_soft_refs();
214 }
215 }
216 };
217
218 class GenCollectorPolicy : public CollectorPolicy {
219 friend class TestGenCollectorPolicy;
220 protected:
221 size_t _min_young_size;
222 size_t _initial_young_size;
223 size_t _max_young_size;
224 size_t _min_old_size;
225 size_t _initial_old_size;
226 size_t _max_old_size;
227
228 // _gen_alignment and _space_alignment will have the same value most of the
229 // time. When using large pages they can differ.
230 size_t _gen_alignment;
231
232 GenerationSpec **_generations;
233
234 // Return true if an allocation should be attempted in the older generation
235 // if it fails in the younger generation. Return false, otherwise.
236 virtual bool should_try_older_generation_allocation(size_t word_size) const;
237
238 void initialize_flags();
239 void initialize_size_info();
240
241 DEBUG_ONLY(void assert_flags();)
242 DEBUG_ONLY(void assert_size_info();)
243
244 // Try to allocate space by expanding the heap.
245 virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
246
247 // Compute max heap alignment.
248 size_t compute_max_alignment();
249
250 // Scale the base_size by NewRatio according to
251 // result = base_size / (NewRatio + 1)
252 // and align by min_alignment()
253 size_t scale_by_NewRatio_aligned(size_t base_size);
254
255 // Bound the value by the given maximum minus the min_alignment.
256 size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
257
258 public:
259 GenCollectorPolicy();
260
261 // Accessors
262 size_t min_young_size() { return _min_young_size; }
263 size_t initial_young_size() { return _initial_young_size; }
264 size_t max_young_size() { return _max_young_size; }
265 size_t gen_alignment() { return _gen_alignment; }
266 size_t min_old_size() { return _min_old_size; }
267 size_t initial_old_size() { return _initial_old_size; }
268 size_t max_old_size() { return _max_old_size; }
269
270 int number_of_generations() { return 2; }
271
272 virtual GenerationSpec **generations() {
273 assert(_generations != NULL, "Sanity check");
274 return _generations;
275 }
276
277 virtual GenCollectorPolicy* as_generation_policy() { return this; }
278
279 virtual void initialize_generations() { };
280
281 virtual void initialize_all() {
282 CollectorPolicy::initialize_all();
283 initialize_generations();
284 }
285
286 size_t young_gen_size_lower_bound();
287
288 HeapWord* mem_allocate_work(size_t size,
289 bool is_tlab,
290 bool* gc_overhead_limit_was_exceeded);
291
292 HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
293
294 // Adaptive size policy
|
199 virtual void post_heap_initialize() = 0;
200 };
201
202 class ClearedAllSoftRefs : public StackObj {
203 bool _clear_all_soft_refs;
204 CollectorPolicy* _collector_policy;
205 public:
206 ClearedAllSoftRefs(bool clear_all_soft_refs,
207 CollectorPolicy* collector_policy) :
208 _clear_all_soft_refs(clear_all_soft_refs),
209 _collector_policy(collector_policy) {}
210
211 ~ClearedAllSoftRefs() {
212 if (_clear_all_soft_refs) {
213 _collector_policy->cleared_all_soft_refs();
214 }
215 }
216 };
217
218 class GenCollectorPolicy : public CollectorPolicy {
219 friend class TestGenCollectorPolicy;
220 friend class VMStructs;
221 protected:
222 size_t _min_young_size;
223 size_t _initial_young_size;
224 size_t _max_young_size;
225 size_t _min_old_size;
226 size_t _initial_old_size;
227 size_t _max_old_size;
228
229 // _gen_alignment and _space_alignment will have the same value most of the
230 // time. When using large pages they can differ.
231 size_t _gen_alignment;
232
233 GenerationSpec* _young_gen_spec;
234 GenerationSpec* _old_gen_spec;
235
236 // Return true if an allocation should be attempted in the older generation
237 // if it fails in the younger generation. Return false, otherwise.
238 virtual bool should_try_older_generation_allocation(size_t word_size) const;
239
240 void initialize_flags();
241 void initialize_size_info();
242
243 DEBUG_ONLY(void assert_flags();)
244 DEBUG_ONLY(void assert_size_info();)
245
246 // Try to allocate space by expanding the heap.
247 virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
248
249 // Compute max heap alignment.
250 size_t compute_max_alignment();
251
252 // Scale the base_size by NewRatio according to
253 // result = base_size / (NewRatio + 1)
254 // and align by min_alignment()
255 size_t scale_by_NewRatio_aligned(size_t base_size);
256
257 // Bound the value by the given maximum minus the min_alignment.
258 size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
259
260 public:
261 GenCollectorPolicy();
262
263 // Accessors
264 size_t min_young_size() { return _min_young_size; }
265 size_t initial_young_size() { return _initial_young_size; }
266 size_t max_young_size() { return _max_young_size; }
267 size_t gen_alignment() { return _gen_alignment; }
268 size_t min_old_size() { return _min_old_size; }
269 size_t initial_old_size() { return _initial_old_size; }
270 size_t max_old_size() { return _max_old_size; }
271
272 int number_of_generations() { return 2; }
273
274 virtual GenerationSpec* young_gen_spec() const {
275 assert(_young_gen_spec != NULL, "Sanity check");
276 return _young_gen_spec;
277 }
278
279 virtual GenerationSpec* old_gen_spec() const {
280 assert(_old_gen_spec != NULL, "Sanity check");
281 return _old_gen_spec;
282 }
283
284 virtual GenCollectorPolicy* as_generation_policy() { return this; }
285
286 virtual void initialize_generations() { };
287
288 virtual void initialize_all() {
289 CollectorPolicy::initialize_all();
290 initialize_generations();
291 }
292
293 size_t young_gen_size_lower_bound();
294
295 HeapWord* mem_allocate_work(size_t size,
296 bool is_tlab,
297 bool* gc_overhead_limit_was_exceeded);
298
299 HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
300
301 // Adaptive size policy
|