41 case InCSetState::Old:
42 return &_old_evac_stats;
43 default:
44 ShouldNotReachHere();
45 return NULL; // Keep some compilers happy
46 }
47 }
48
49 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
50 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
51 // Prevent humongous PLAB sizes for two reasons:
52 // * PLABs are allocated using a similar paths as oops, but should
53 // never be in a humongous region
54 // * Allowing humongous PLABs needlessly churns the region free lists
55 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
56 }
57
58 // Inline functions for G1CollectedHeap
59
60 // Return the region with the given index. It assumes the index is valid.
61 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
62
63 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
64 return _hrm.next_region_in_humongous(hr);
65 }
66
67 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
68 assert(is_in_reserved(addr),
69 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
70 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
71 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
72 }
73
74 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
75 return _hrm.reserved().start() + index * HeapRegion::GrainWords;
76 }
77
78 template <class T>
79 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
80 assert(addr != NULL, "invariant");
81 assert(is_in_g1_reserved((const void*) addr),
82 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
83 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
84 return _hrm.addr_to_region((HeapWord*) addr);
85 }
86
87 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
88 _old_set.add(hr);
89 }
90
91 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
92 _old_set.remove(hr);
93 }
94
95 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
96 _archive_set.add(hr);
97 }
98
99 // It dirties the cards that cover the block so that the post
100 // write barrier never queues anything when updating objects on this
101 // block. It is assumed (and in fact we assert) that the block
102 // belongs to a young region.
103 inline void
104 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
237 }
238 return is_obj_dead(obj, heap_region_containing(obj));
239 }
240
241 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
242 if (obj == NULL) {
243 return false;
244 }
245 return is_obj_ill(obj, heap_region_containing(obj));
246 }
247
248 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
249 return !is_marked_next(obj) && !hr->is_archive();
250 }
251
252 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
253 return is_obj_dead_full(obj, heap_region_containing(obj));
254 }
255
256 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
257 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
258 _humongous_reclaim_candidates.set_candidate(region, value);
259 }
260
261 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
262 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
263 return _humongous_reclaim_candidates.is_candidate(region);
264 }
265
266 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
267 uint region = addr_to_region((HeapWord*)obj);
268 // Clear the flag in the humongous_reclaim_candidates table. Also
269 // reset the entry in the _in_cset_fast_test table so that subsequent references
270 // to the same humongous object do not go into the slow path again.
271 // This is racy, as multiple threads may at the same time enter here, but this
272 // is benign.
273 // During collection we only ever clear the "candidate" flag, and only ever clear the
274 // entry in the in_cset_fast_table.
275 // We only ever evaluate the contents of these tables (in the VM thread) after
276 // having synchronized the worker threads with the VM thread, or in the same
277 // thread (i.e. within the VM thread).
278 if (is_humongous_reclaim_candidate(region)) {
279 set_humongous_reclaim_candidate(region, false);
280 _in_cset_fast_test.clear_humongous(region);
281 }
282 }
|
41 case InCSetState::Old:
42 return &_old_evac_stats;
43 default:
44 ShouldNotReachHere();
45 return NULL; // Keep some compilers happy
46 }
47 }
48
49 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
50 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
51 // Prevent humongous PLAB sizes for two reasons:
52 // * PLABs are allocated using a similar paths as oops, but should
53 // never be in a humongous region
54 // * Allowing humongous PLABs needlessly churns the region free lists
55 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
56 }
57
58 // Inline functions for G1CollectedHeap
59
60 // Return the region with the given index. It assumes the index is valid.
61 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
62
63 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
64 return _hrm->next_region_in_humongous(hr);
65 }
66
67 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
68 assert(is_in_reserved(addr),
69 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
70 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
71 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
72 }
73
74 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
75 return _hrm->reserved().start() + index * HeapRegion::GrainWords;
76 }
77
78 template <class T>
79 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
80 assert(addr != NULL, "invariant");
81 assert(is_in_g1_reserved((const void*) addr),
82 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
83 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
84 return _hrm->addr_to_region((HeapWord*) addr);
85 }
86
87 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
88 _old_set.add(hr);
89 }
90
91 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
92 _old_set.remove(hr);
93 }
94
95 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
96 _archive_set.add(hr);
97 }
98
99 // It dirties the cards that cover the block so that the post
100 // write barrier never queues anything when updating objects on this
101 // block. It is assumed (and in fact we assert) that the block
102 // belongs to a young region.
103 inline void
104 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
237 }
238 return is_obj_dead(obj, heap_region_containing(obj));
239 }
240
241 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
242 if (obj == NULL) {
243 return false;
244 }
245 return is_obj_ill(obj, heap_region_containing(obj));
246 }
247
248 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
249 return !is_marked_next(obj) && !hr->is_archive();
250 }
251
252 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
253 return is_obj_dead_full(obj, heap_region_containing(obj));
254 }
255
256 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
257 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
258 _humongous_reclaim_candidates.set_candidate(region, value);
259 }
260
261 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
262 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
263 return _humongous_reclaim_candidates.is_candidate(region);
264 }
265
266 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
267 uint region = addr_to_region((HeapWord*)obj);
268 // Clear the flag in the humongous_reclaim_candidates table. Also
269 // reset the entry in the _in_cset_fast_test table so that subsequent references
270 // to the same humongous object do not go into the slow path again.
271 // This is racy, as multiple threads may at the same time enter here, but this
272 // is benign.
273 // During collection we only ever clear the "candidate" flag, and only ever clear the
274 // entry in the in_cset_fast_table.
275 // We only ever evaluate the contents of these tables (in the VM thread) after
276 // having synchronized the worker threads with the VM thread, or in the same
277 // thread (i.e. within the VM thread).
278 if (is_humongous_reclaim_candidate(region)) {
279 set_humongous_reclaim_candidate(region, false);
280 _in_cset_fast_test.clear_humongous(region);
281 }
282 }
|