59
60 class CollectorPolicy : public CHeapObj<mtGC> {
61 protected:
62 GCPolicyCounters* _gc_policy_counters;
63
64 virtual void initialize_alignments() = 0;
65 virtual void initialize_flags();
66 virtual void initialize_size_info();
67
68 DEBUG_ONLY(virtual void assert_flags();)
69 DEBUG_ONLY(virtual void assert_size_info();)
70
71 size_t _initial_heap_byte_size;
72 size_t _max_heap_byte_size;
73 size_t _min_heap_byte_size;
74
75 size_t _space_alignment;
76 size_t _heap_alignment;
77
78 // Needed to keep information if MaxHeapSize was set on the command line
79 // when the flag value is aligned etc by ergonomics
80 bool _max_heap_size_cmdline;
81
82 // The sizing of the heap are controlled by a sizing policy.
83 AdaptiveSizePolicy* _size_policy;
84
85 // Set to true when policy wants soft refs cleared.
86 // Reset to false by gc after it clears all soft refs.
87 bool _should_clear_all_soft_refs;
88
89 // Set to true by the GC if the just-completed gc cleared all
90 // softrefs. This is set to true whenever a gc clears all softrefs, and
91 // set to false each time gc returns to the mutator. For example, in the
92 // ParallelScavengeHeap case the latter would be done toward the end of
93 // mem_allocate() where it returns op.result()
94 bool _all_soft_refs_clear;
95
96 CollectorPolicy();
97
98 public:
99 virtual void initialize_all() {
100 initialize_alignments();
101 initialize_flags();
102 initialize_size_info();
103 }
104
105 // Return maximum heap alignment that may be imposed by the policy
106 static size_t compute_heap_alignment();
107
108 size_t space_alignment() { return _space_alignment; }
109 size_t heap_alignment() { return _heap_alignment; }
110
111 size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
112 size_t max_heap_byte_size() { return _max_heap_byte_size; }
113 size_t min_heap_byte_size() { return _min_heap_byte_size; }
114
115 enum Name {
116 CollectorPolicyKind,
117 TwoGenerationCollectorPolicyKind,
118 ConcurrentMarkSweepPolicyKind,
119 ASConcurrentMarkSweepPolicyKind,
120 G1CollectorPolicyKind
121 };
122
123 AdaptiveSizePolicy* size_policy() { return _size_policy; }
124 bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
125 void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
163
164 // This method controls how a collector satisfies a request
165 // for a block of memory. "gc_time_limit_was_exceeded" will
166 // be set to true if the adaptive size policy determine that
167 // an excessive amount of time is being spent doing collections
168 // and caused a NULL to be returned. If a NULL is not returned,
169 // "gc_time_limit_was_exceeded" has an undefined meaning.
170 virtual HeapWord* mem_allocate_work(size_t size,
171 bool is_tlab,
172 bool* gc_overhead_limit_was_exceeded) = 0;
173
174 // This method controls how a collector handles one or more
175 // of its generations being fully allocated.
176 virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
177 // This method controls how a collector handles a metadata allocation
178 // failure.
179 virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
180 size_t size,
181 Metaspace::MetadataType mdtype);
182
183 // Performace Counter support
184 GCPolicyCounters* counters() { return _gc_policy_counters; }
185
186 // Create the jstat counters for the GC policy. By default, policy's
187 // don't have associated counters, and we complain if this is invoked.
188 virtual void initialize_gc_policy_counters() {
189 ShouldNotReachHere();
190 }
191
192 virtual CollectorPolicy::Name kind() {
193 return CollectorPolicy::CollectorPolicyKind;
194 }
195
196 // Returns true if a collector has eden space with soft end.
197 virtual bool has_soft_ended_eden() {
198 return false;
199 }
200
201 // Do any updates required to global flags that are due to heap initialization
202 // changes
203 virtual void post_heap_initialize() = 0;
214
215 ~ClearedAllSoftRefs() {
216 if (_clear_all_soft_refs) {
217 _collector_policy->cleared_all_soft_refs();
218 }
219 }
220 };
221
222 class GenCollectorPolicy : public CollectorPolicy {
223 protected:
224 size_t _min_gen0_size;
225 size_t _initial_gen0_size;
226 size_t _max_gen0_size;
227
228 // _gen_alignment and _space_alignment will have the same value most of the
229 // time. When using large pages they can differ.
230 size_t _gen_alignment;
231
232 GenerationSpec **_generations;
233
234 // Return true if an allocation should be attempted in the older
235 // generation if it fails in the younger generation. Return
236 // false, otherwise.
237 virtual bool should_try_older_generation_allocation(size_t word_size) const;
238
239 void initialize_flags();
240 void initialize_size_info();
241
242 DEBUG_ONLY(void assert_flags();)
243 DEBUG_ONLY(void assert_size_info();)
244
245 // Try to allocate space by expanding the heap.
246 virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
247
248 // Compute max heap alignment
249 size_t compute_max_alignment();
250
251 // Scale the base_size by NewRatio according to
252 // result = base_size / (NewRatio + 1)
253 // and align by min_alignment()
254 size_t scale_by_NewRatio_aligned(size_t base_size);
255
256 // Bound the value by the given maximum minus the min_alignment
257 size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
258
259 public:
260 GenCollectorPolicy();
261
262 // Accessors
263 size_t min_gen0_size() { return _min_gen0_size; }
264 size_t initial_gen0_size() { return _initial_gen0_size; }
265 size_t max_gen0_size() { return _max_gen0_size; }
266 size_t gen_alignment() { return _gen_alignment; }
267
268 virtual int number_of_generations() = 0;
269
270 virtual GenerationSpec **generations() {
271 assert(_generations != NULL, "Sanity check");
272 return _generations;
273 }
274
275 virtual GenCollectorPolicy* as_generation_policy() { return this; }
276
|
59
60 class CollectorPolicy : public CHeapObj<mtGC> {
61 protected:
62 GCPolicyCounters* _gc_policy_counters;
63
64 virtual void initialize_alignments() = 0;
65 virtual void initialize_flags();
66 virtual void initialize_size_info();
67
68 DEBUG_ONLY(virtual void assert_flags();)
69 DEBUG_ONLY(virtual void assert_size_info();)
70
71 size_t _initial_heap_byte_size;
72 size_t _max_heap_byte_size;
73 size_t _min_heap_byte_size;
74
75 size_t _space_alignment;
76 size_t _heap_alignment;
77
78 // Needed to keep information if MaxHeapSize was set on the command line
79 // when the flag value is aligned etc by ergonomics.
80 bool _max_heap_size_cmdline;
81
82 // The sizing of the heap is controlled by a sizing policy.
83 AdaptiveSizePolicy* _size_policy;
84
85 // Set to true when policy wants soft refs cleared.
86 // Reset to false by gc after it clears all soft refs.
87 bool _should_clear_all_soft_refs;
88
89 // Set to true by the GC if the just-completed gc cleared all
90 // softrefs. This is set to true whenever a gc clears all softrefs, and
91 // set to false each time gc returns to the mutator. For example, in the
92 // ParallelScavengeHeap case the latter would be done toward the end of
93 // mem_allocate() where it returns op.result()
94 bool _all_soft_refs_clear;
95
96 CollectorPolicy();
97
98 public:
99 virtual void initialize_all() {
100 initialize_alignments();
101 initialize_flags();
102 initialize_size_info();
103 }
104
105 // Return maximum heap alignment that may be imposed by the policy.
106 static size_t compute_heap_alignment();
107
108 size_t space_alignment() { return _space_alignment; }
109 size_t heap_alignment() { return _heap_alignment; }
110
111 size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
112 size_t max_heap_byte_size() { return _max_heap_byte_size; }
113 size_t min_heap_byte_size() { return _min_heap_byte_size; }
114
115 enum Name {
116 CollectorPolicyKind,
117 TwoGenerationCollectorPolicyKind,
118 ConcurrentMarkSweepPolicyKind,
119 ASConcurrentMarkSweepPolicyKind,
120 G1CollectorPolicyKind
121 };
122
123 AdaptiveSizePolicy* size_policy() { return _size_policy; }
124 bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
125 void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
163
164 // This method controls how a collector satisfies a request
165 // for a block of memory. "gc_time_limit_was_exceeded" will
166 // be set to true if the adaptive size policy determine that
167 // an excessive amount of time is being spent doing collections
168 // and caused a NULL to be returned. If a NULL is not returned,
169 // "gc_time_limit_was_exceeded" has an undefined meaning.
170 virtual HeapWord* mem_allocate_work(size_t size,
171 bool is_tlab,
172 bool* gc_overhead_limit_was_exceeded) = 0;
173
174 // This method controls how a collector handles one or more
175 // of its generations being fully allocated.
176 virtual HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab) = 0;
177 // This method controls how a collector handles a metadata allocation
178 // failure.
179 virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
180 size_t size,
181 Metaspace::MetadataType mdtype);
182
183 // Performance Counter support
184 GCPolicyCounters* counters() { return _gc_policy_counters; }
185
186 // Create the jstat counters for the GC policy. By default, policy's
187 // don't have associated counters, and we complain if this is invoked.
188 virtual void initialize_gc_policy_counters() {
189 ShouldNotReachHere();
190 }
191
192 virtual CollectorPolicy::Name kind() {
193 return CollectorPolicy::CollectorPolicyKind;
194 }
195
196 // Returns true if a collector has eden space with soft end.
197 virtual bool has_soft_ended_eden() {
198 return false;
199 }
200
201 // Do any updates required to global flags that are due to heap initialization
202 // changes
203 virtual void post_heap_initialize() = 0;
214
215 ~ClearedAllSoftRefs() {
216 if (_clear_all_soft_refs) {
217 _collector_policy->cleared_all_soft_refs();
218 }
219 }
220 };
221
222 class GenCollectorPolicy : public CollectorPolicy {
223 protected:
224 size_t _min_gen0_size;
225 size_t _initial_gen0_size;
226 size_t _max_gen0_size;
227
228 // _gen_alignment and _space_alignment will have the same value most of the
229 // time. When using large pages they can differ.
230 size_t _gen_alignment;
231
232 GenerationSpec **_generations;
233
234 // Return true if an allocation should be attempted in the older generation
235 // if it fails in the younger generation. Return false, otherwise.
236 virtual bool should_try_older_generation_allocation(size_t word_size) const;
237
238 void initialize_flags();
239 void initialize_size_info();
240
241 DEBUG_ONLY(void assert_flags();)
242 DEBUG_ONLY(void assert_size_info();)
243
244 // Try to allocate space by expanding the heap.
245 virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
246
247 // Compute max heap alignment.
248 size_t compute_max_alignment();
249
250 // Scale the base_size by NewRatio according to
251 // result = base_size / (NewRatio + 1)
252 // and align by min_alignment()
253 size_t scale_by_NewRatio_aligned(size_t base_size);
254
255 // Bound the value by the given maximum minus the min_alignment.
256 size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
257
258 public:
259 GenCollectorPolicy();
260
261 // Accessors
262 size_t min_gen0_size() { return _min_gen0_size; }
263 size_t initial_gen0_size() { return _initial_gen0_size; }
264 size_t max_gen0_size() { return _max_gen0_size; }
265 size_t gen_alignment() { return _gen_alignment; }
266
267 virtual int number_of_generations() = 0;
268
269 virtual GenerationSpec **generations() {
270 assert(_generations != NULL, "Sanity check");
271 return _generations;
272 }
273
274 virtual GenCollectorPolicy* as_generation_policy() { return this; }
275
|