164 }
165
166 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
167 return _region_attr.at((HeapWord*)addr);
168 }
169
170 G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
171 return _region_attr.get_by_index(idx);
172 }
173
174 void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
175 _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
176 }
177
178 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
179 _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
180 }
181
182 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
183 _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
184 _rem_set->prepare_for_scan_heap_roots(r->hrm_index());
185 }
186
187 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
188 _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
189 }
190
191 #ifndef PRODUCT
192 // Support for G1EvacuationFailureALot
193
194 inline bool
195 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
196 bool during_initial_mark,
197 bool mark_or_rebuild_in_progress) {
198 bool res = false;
199 if (mark_or_rebuild_in_progress) {
200 res |= G1EvacuationFailureALotDuringConcMark;
201 }
202 if (during_initial_mark) {
203 res |= G1EvacuationFailureALotDuringInitialMark;
204 }
280 }
281 return is_obj_ill(obj, heap_region_containing(obj));
282 }
283
284 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
285 return !is_marked_next(obj) && !hr->is_archive();
286 }
287
288 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
289 return is_obj_dead_full(obj, heap_region_containing(obj));
290 }
291
292 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
293 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
294 _humongous_reclaim_candidates.set_candidate(region, value);
295 }
296
297 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
298 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
299 return _humongous_reclaim_candidates.is_candidate(region);
300 }
301
302 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
303 uint region = addr_to_region((HeapWord*)obj);
304 // Clear the flag in the humongous_reclaim_candidates table. Also
305 // reset the entry in the region attribute table so that subsequent references
306 // to the same humongous object do not go into the slow path again.
307 // This is racy, as multiple threads may at the same time enter here, but this
308 // is benign.
309 // During collection we only ever clear the "candidate" flag, and only ever clear the
310 // entry in the in_cset_fast_table.
311 // We only ever evaluate the contents of these tables (in the VM thread) after
312 // having synchronized the worker threads with the VM thread, or in the same
313 // thread (i.e. within the VM thread).
314 if (is_humongous_reclaim_candidate(region)) {
315 set_humongous_reclaim_candidate(region, false);
316 _region_attr.clear_humongous(region);
317 }
318 }
319
|
164 }
165
166 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
167 return _region_attr.at((HeapWord*)addr);
168 }
169
170 G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
171 return _region_attr.get_by_index(idx);
172 }
173
174 void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
175 _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
176 }
177
178 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
179 _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
180 }
181
182 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
183 _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
184 _rem_set->exclude_region_from_scan(r->hrm_index());
185 }
186
187 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
188 _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
189 }
190
191 #ifndef PRODUCT
192 // Support for G1EvacuationFailureALot
193
194 inline bool
195 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
196 bool during_initial_mark,
197 bool mark_or_rebuild_in_progress) {
198 bool res = false;
199 if (mark_or_rebuild_in_progress) {
200 res |= G1EvacuationFailureALotDuringConcMark;
201 }
202 if (during_initial_mark) {
203 res |= G1EvacuationFailureALotDuringInitialMark;
204 }
280 }
281 return is_obj_ill(obj, heap_region_containing(obj));
282 }
283
284 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
285 return !is_marked_next(obj) && !hr->is_archive();
286 }
287
288 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
289 return is_obj_dead_full(obj, heap_region_containing(obj));
290 }
291
292 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
293 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
294 _humongous_reclaim_candidates.set_candidate(region, value);
295 }
296
297 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
298 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
299 return _humongous_reclaim_candidates.is_candidate(region);
300 }
301
302 inline void G1CollectedHeap::set_has_humongous_reclaim_candidate(bool value) {
303 _has_humongous_reclaim_candidates = value;
304 }
305
306 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
307 uint region = addr_to_region((HeapWord*)obj);
308 // Clear the flag in the humongous_reclaim_candidates table. Also
309 // reset the entry in the region attribute table so that subsequent references
310 // to the same humongous object do not go into the slow path again.
311 // This is racy, as multiple threads may at the same time enter here, but this
312 // is benign.
313 // During collection we only ever clear the "candidate" flag, and only ever clear the
314 // entry in the in_cset_fast_table.
315 // We only ever evaluate the contents of these tables (in the VM thread) after
316 // having synchronized the worker threads with the VM thread, or in the same
317 // thread (i.e. within the VM thread).
318 if (is_humongous_reclaim_candidate(region)) {
319 set_humongous_reclaim_candidate(region, false);
320 _region_attr.clear_humongous(region);
321 }
322 }
323
|