29
30 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
31 HeapRegion* G1AllocRegion::_dummy_region = NULL;
32
33 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
34 assert(_dummy_region == NULL, "should be set once");
35 assert(dummy_region != NULL, "pre-condition");
36 assert(dummy_region->free() == 0, "pre-condition");
37
38 // Make sure that any allocation attempt on this region will fail
39 // and will not trigger any asserts.
40 assert(allocate(dummy_region, 1, false) == NULL, "should fail");
41 assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
42 assert(allocate(dummy_region, 1, true) == NULL, "should fail");
43 assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
44
45 _g1h = g1h;
46 _dummy_region = dummy_region;
47 }
48
49 void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
50 bool bot_updates) {
51 assert(alloc_region != NULL && alloc_region != _dummy_region,
52 "pre-condition");
53
54 // Other threads might still be trying to allocate using a CAS out
55 // of the region we are trying to retire, as they can do so without
56 // holding the lock. So, we first have to make sure that noone else
57 // can allocate out of it by doing a maximal allocation. Even if our
58 // CAS attempt fails a few times, we'll succeed sooner or later
59 // given that failed CAS attempts mean that the region is getting
60 // closed to being full.
61 size_t free_word_size = alloc_region->free() / HeapWordSize;
62
63 // This is the minimum free chunk we can turn into a dummy
64 // object. If the free space falls below this, then noone can
65 // allocate in this region anyway (all allocation requests will be
66 // of a size larger than this) so we won't have to perform the dummy
67 // allocation.
68 size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
69
70 while (free_word_size >= min_word_size_to_fill) {
71 HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
72 if (dummy != NULL) {
73 // If the allocation was successful we should fill in the space.
74 CollectedHeap::fill_with_object(dummy, free_word_size);
75 alloc_region->set_pre_dummy_top(dummy);
76 break;
77 }
78
79 free_word_size = alloc_region->free() / HeapWordSize;
80 // It's also possible that someone else beats us to the
81 // allocation and they fill up the region. In that case, we can
82 // just get out of the loop.
83 }
84 assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
85 "post-condition");
86 }
87
88 void G1AllocRegion::retire(bool fill_up) {
89 assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
90
91 trace("retiring");
92 HeapRegion* alloc_region = _alloc_region;
93 if (alloc_region != _dummy_region) {
94 // We never have to check whether the active region is empty or not,
95 // and potentially free it if it is, given that it's guaranteed that
96 // it will never be empty.
97 assert(!alloc_region->is_empty(),
98 ar_ext_msg(this, "the alloc region should never be empty"));
99
100 if (fill_up) {
101 fill_up_remaining_space(alloc_region, _bot_updates);
102 }
103
104 assert(alloc_region->used() >= _used_bytes_before,
105 ar_ext_msg(this, "invariant"));
106 size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
107 retire_region(alloc_region, allocated_bytes);
108 _used_bytes_before = 0;
109 _alloc_region = _dummy_region;
110 }
111 trace("retired");
112 }
113
114 HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
115 bool force) {
116 assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
117 assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
118
119 trace("attempting region allocation");
120 HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
121 if (new_alloc_region != NULL) {
122 new_alloc_region->reset_pre_dummy_top();
123 // Need to do this before the allocation
124 _used_bytes_before = new_alloc_region->used();
125 HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
126 assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
127
128 OrderAccess::storestore();
129 // Note that we first perform the allocation and then we store the
130 // region in _alloc_region. This is the reason why an active region
131 // can never be empty.
234 }
235 #endif // G1_ALLOC_REGION_TRACING
236
237 G1AllocRegion::G1AllocRegion(const char* name,
238 bool bot_updates)
239 : _name(name), _bot_updates(bot_updates),
240 _alloc_region(NULL), _count(0), _used_bytes_before(0),
241 _allocation_context(AllocationContext::system()) { }
242
243
244 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
245 bool force) {
246 return _g1h->new_mutator_alloc_region(word_size, force);
247 }
248
249 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
250 size_t allocated_bytes) {
251 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
252 }
253
254 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
255 bool force) {
256 assert(!force, "not supported for GC alloc regions");
257 return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Young);
258 }
259
260 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
261 size_t allocated_bytes) {
262 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
263 }
264
265 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
266 bool force) {
267 assert(!force, "not supported for GC alloc regions");
268 return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Old);
269 }
270
271 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
272 size_t allocated_bytes) {
273 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
274 }
275
276 HeapRegion* OldGCAllocRegion::release() {
277 HeapRegion* cur = get();
278 if (cur != NULL) {
279 // Determine how far we are from the next card boundary. If it is smaller than
280 // the minimum object size we can allocate into, expand into the next card.
281 HeapWord* top = cur->top();
282 HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
283
284 size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
285
286 if (to_allocate_words != 0) {
287 // We are not at a card boundary. Fill up, possibly into the next, taking the
288 // end of the region and the minimum object size into account.
289 to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
290 MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
291
292 // Skip allocation if there is not enough space to allocate even the smallest
293 // possible object. In this case this region will not be retained, so the
294 // original problem cannot occur.
295 if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
296 HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
297 CollectedHeap::fill_with_object(dummy, to_allocate_words);
298 }
299 }
300 }
301 return G1AllocRegion::release();
302 }
303
304
|
29
30 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
31 HeapRegion* G1AllocRegion::_dummy_region = NULL;
32
33 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
34 assert(_dummy_region == NULL, "should be set once");
35 assert(dummy_region != NULL, "pre-condition");
36 assert(dummy_region->free() == 0, "pre-condition");
37
38 // Make sure that any allocation attempt on this region will fail
39 // and will not trigger any asserts.
40 assert(allocate(dummy_region, 1, false) == NULL, "should fail");
41 assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
42 assert(allocate(dummy_region, 1, true) == NULL, "should fail");
43 assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
44
45 _g1h = g1h;
46 _dummy_region = dummy_region;
47 }
48
49 size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
50 bool bot_updates) {
51 assert(alloc_region != NULL && alloc_region != _dummy_region,
52 "pre-condition");
53 size_t result = 0;
54
55 // Other threads might still be trying to allocate using a CAS out
56 // of the region we are trying to retire, as they can do so without
57 // holding the lock. So, we first have to make sure that noone else
58 // can allocate out of it by doing a maximal allocation. Even if our
59 // CAS attempt fails a few times, we'll succeed sooner or later
60 // given that failed CAS attempts mean that the region is getting
61 // closed to being full.
62 size_t free_word_size = alloc_region->free() / HeapWordSize;
63
64 // This is the minimum free chunk we can turn into a dummy
65 // object. If the free space falls below this, then noone can
66 // allocate in this region anyway (all allocation requests will be
67 // of a size larger than this) so we won't have to perform the dummy
68 // allocation.
69 size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
70
71 while (free_word_size >= min_word_size_to_fill) {
72 HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
73 if (dummy != NULL) {
74 // If the allocation was successful we should fill in the space.
75 CollectedHeap::fill_with_object(dummy, free_word_size);
76 alloc_region->set_pre_dummy_top(dummy);
77 result += free_word_size * HeapWordSize;
78 break;
79 }
80
81 free_word_size = alloc_region->free() / HeapWordSize;
82 // It's also possible that someone else beats us to the
83 // allocation and they fill up the region. In that case, we can
84 // just get out of the loop.
85 }
86 result += alloc_region->free();
87
88 assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
89 "post-condition");
90 return result;
91 }
92
93 size_t G1AllocRegion::retire(bool fill_up) {
94 assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
95
96 size_t result = 0;
97
98 trace("retiring");
99 HeapRegion* alloc_region = _alloc_region;
100 if (alloc_region != _dummy_region) {
101 // We never have to check whether the active region is empty or not,
102 // and potentially free it if it is, given that it's guaranteed that
103 // it will never be empty.
104 assert(!alloc_region->is_empty(),
105 ar_ext_msg(this, "the alloc region should never be empty"));
106
107 if (fill_up) {
108 result = fill_up_remaining_space(alloc_region, _bot_updates);
109 }
110
111 assert(alloc_region->used() >= _used_bytes_before,
112 ar_ext_msg(this, "invariant"));
113 size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
114 retire_region(alloc_region, allocated_bytes);
115 _used_bytes_before = 0;
116 _alloc_region = _dummy_region;
117 }
118 trace("retired");
119
120 return result;
121 }
122
123 HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
124 bool force) {
125 assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
126 assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
127
128 trace("attempting region allocation");
129 HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
130 if (new_alloc_region != NULL) {
131 new_alloc_region->reset_pre_dummy_top();
132 // Need to do this before the allocation
133 _used_bytes_before = new_alloc_region->used();
134 HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
135 assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
136
137 OrderAccess::storestore();
138 // Note that we first perform the allocation and then we store the
139 // region in _alloc_region. This is the reason why an active region
140 // can never be empty.
243 }
244 #endif // G1_ALLOC_REGION_TRACING
245
246 G1AllocRegion::G1AllocRegion(const char* name,
247 bool bot_updates)
248 : _name(name), _bot_updates(bot_updates),
249 _alloc_region(NULL), _count(0), _used_bytes_before(0),
250 _allocation_context(AllocationContext::system()) { }
251
252
253 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
254 bool force) {
255 return _g1h->new_mutator_alloc_region(word_size, force);
256 }
257
258 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
259 size_t allocated_bytes) {
260 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
261 }
262
263 HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
264 bool force) {
265 assert(!force, "not supported for GC alloc regions");
266 return _g1h->new_gc_alloc_region(word_size, count(), _purpose);
267 }
268
269 void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
270 size_t allocated_bytes) {
271 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, _purpose);
272 }
273
274 size_t G1GCAllocRegion::retire(bool fill_up) {
275 HeapRegion* retired = get();
276 size_t end_waste = G1AllocRegion::retire(fill_up);
277 // Do not count retirement of the dummy allocation region.
278 if (retired != NULL) {
279 _stats->add_region_end_waste(end_waste / HeapWordSize);
280 }
281 return end_waste;
282 }
283
284 HeapRegion* OldGCAllocRegion::release() {
285 HeapRegion* cur = get();
286 if (cur != NULL) {
287 // Determine how far we are from the next card boundary. If it is smaller than
288 // the minimum object size we can allocate into, expand into the next card.
289 HeapWord* top = cur->top();
290 HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
291
292 size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
293
294 if (to_allocate_words != 0) {
295 // We are not at a card boundary. Fill up, possibly into the next, taking the
296 // end of the region and the minimum object size into account.
297 to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
298 MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
299
300 // Skip allocation if there is not enough space to allocate even the smallest
301 // possible object. In this case this region will not be retained, so the
302 // original problem cannot occur.
303 if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
304 HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
305 CollectedHeap::fill_with_object(dummy, to_allocate_words);
306 }
307 }
308 }
309 return G1AllocRegion::release();
310 }
|