176 }
177
178 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
179 _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
180 }
181
182 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
183 _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
184 _rem_set->exclude_region_from_scan(r->hrm_index());
185 }
186
187 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
188 _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
189 }
190
191 #ifndef PRODUCT
192 // Support for G1EvacuationFailureALot
193
194 inline bool
195 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
196 bool during_initial_mark,
197 bool mark_or_rebuild_in_progress) {
198 bool res = false;
199 if (mark_or_rebuild_in_progress) {
200 res |= G1EvacuationFailureALotDuringConcMark;
201 }
202 if (during_initial_mark) {
203 res |= G1EvacuationFailureALotDuringInitialMark;
204 }
205 if (for_young_gc) {
206 res |= G1EvacuationFailureALotDuringYoungGC;
207 } else {
208 // GCs are mixed
209 res |= G1EvacuationFailureALotDuringMixedGC;
210 }
211 return res;
212 }
213
214 inline void
215 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
216 if (G1EvacuationFailureALot) {
217 // Note we can't assert that _evacuation_failure_alot_for_current_gc
218 // is clear here. It may have been set during a previous GC but that GC
219 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
220 // trigger an evacuation failure and clear the flags and and counts.
221
222 // Check if we have gone over the interval.
223 const size_t gc_num = total_collections();
224 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
225
226 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
227
228 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
229 const bool in_young_only_phase = collector_state()->in_young_only_phase();
230 const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
231 const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
232
233 _evacuation_failure_alot_for_current_gc &=
234 evacuation_failure_alot_for_gc_type(in_young_only_phase,
235 in_initial_mark_gc,
236 mark_or_rebuild_in_progress);
237 }
238 }
239
240 inline bool G1CollectedHeap::evacuation_should_fail() {
241 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
242 return false;
243 }
244 // G1EvacuationFailureALot is in effect for current GC
245 // Access to _evacuation_failure_alot_count is not atomic;
246 // the value does not have to be exact.
247 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
248 return false;
249 }
250 _evacuation_failure_alot_count = 0;
251 return true;
252 }
253
254 inline void G1CollectedHeap::reset_evacuation_should_fail() {
255 if (G1EvacuationFailureALot) {
|
176 }
177
178 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
179 _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
180 }
181
182 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
183 _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
184 _rem_set->exclude_region_from_scan(r->hrm_index());
185 }
186
187 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
188 _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
189 }
190
191 #ifndef PRODUCT
192 // Support for G1EvacuationFailureALot
193
194 inline bool
195 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
196 bool during_concurrent_start,
197 bool mark_or_rebuild_in_progress) {
198 bool res = false;
199 if (mark_or_rebuild_in_progress) {
200 res |= G1EvacuationFailureALotDuringConcMark;
201 }
202 if (during_concurrent_start) {
203 res |= G1EvacuationFailureALotDuringInitialMark;
204 }
205 if (for_young_gc) {
206 res |= G1EvacuationFailureALotDuringYoungGC;
207 } else {
208 // GCs are mixed
209 res |= G1EvacuationFailureALotDuringMixedGC;
210 }
211 return res;
212 }
213
214 inline void
215 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
216 if (G1EvacuationFailureALot) {
217 // Note we can't assert that _evacuation_failure_alot_for_current_gc
218 // is clear here. It may have been set during a previous GC but that GC
219 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
220 // trigger an evacuation failure and clear the flags and and counts.
221
222 // Check if we have gone over the interval.
223 const size_t gc_num = total_collections();
224 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
225
226 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
227
228 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
229 const bool in_young_only_phase = collector_state()->in_young_only_phase();
230 const bool in_concurrent_start_gc = collector_state()->in_concurrent_start_gc();
231 const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
232
233 _evacuation_failure_alot_for_current_gc &=
234 evacuation_failure_alot_for_gc_type(in_young_only_phase,
235 in_concurrent_start_gc,
236 mark_or_rebuild_in_progress);
237 }
238 }
239
240 inline bool G1CollectedHeap::evacuation_should_fail() {
241 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
242 return false;
243 }
244 // G1EvacuationFailureALot is in effect for current GC
245 // Access to _evacuation_failure_alot_count is not atomic;
246 // the value does not have to be exact.
247 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
248 return false;
249 }
250 _evacuation_failure_alot_count = 0;
251 return true;
252 }
253
254 inline void G1CollectedHeap::reset_evacuation_should_fail() {
255 if (G1EvacuationFailureALot) {
|