16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1AllocRegion.inline.hpp"
28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 #include "gc/g1/g1CollectorPolicy.hpp"
30 #include "gc/g1/g1MarkSweep.hpp"
31 #include "gc/g1/heapRegion.inline.hpp"
32 #include "gc/g1/heapRegionSet.inline.hpp"
33
34 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
35 G1Allocator(heap),
36 _retained_old_gc_alloc_region(NULL),
37 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
38 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
39 }
40
41 void G1DefaultAllocator::init_mutator_alloc_region() {
42 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
43 _mutator_alloc_region.init();
44 }
45
46 void G1DefaultAllocator::release_mutator_alloc_region() {
47 _mutator_alloc_region.release();
48 assert(_mutator_alloc_region.get() == NULL, "post-condition");
49 }
50
51 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
52 OldGCAllocRegion* old,
53 HeapRegion** retained_old) {
54 HeapRegion* retained_region = *retained_old;
55 *retained_old = NULL;
70 !(retained_region->top() == retained_region->end()) &&
71 !retained_region->is_empty() &&
72 !retained_region->is_humongous()) {
73 retained_region->record_timestamp();
74 // The retained region was added to the old region set when it was
75 // retired. We have to remove it now, since we don't allow regions
76 // we allocate to in the region sets. We'll re-add it later, when
77 // it's retired again.
78 _g1h->old_set_remove(retained_region);
79 bool during_im = _g1h->collector_state()->during_initial_mark_pause();
80 retained_region->note_start_of_copying(during_im);
81 old->set(retained_region);
82 _g1h->hr_printer()->reuse(retained_region);
83 evacuation_info.set_alloc_regions_used_before(retained_region->used());
84 }
85 }
86
87 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
88 assert_at_safepoint(true /* should_be_vm_thread */);
89
90 G1Allocator::init_gc_alloc_regions(evacuation_info);
91
92 _survivor_gc_alloc_region.init();
93 _old_gc_alloc_region.init();
94 reuse_retained_old_region(evacuation_info,
95 &_old_gc_alloc_region,
96 &_retained_old_gc_alloc_region);
97 }
98
99 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
100 AllocationContext_t context = AllocationContext::current();
101 evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
102 old_gc_alloc_region(context)->count());
103 survivor_gc_alloc_region(context)->release();
104 // If we have an old GC alloc region to release, we'll save it in
105 // _retained_old_gc_alloc_region. If we don't
106 // _retained_old_gc_alloc_region will become NULL. This is what we
107 // want either way so no reason to check explicitly for either
108 // condition.
109 _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
110 if (_retained_old_gc_alloc_region != NULL) {
111 _retained_old_gc_alloc_region->record_retained_region();
112 }
113 }
114
115 void G1DefaultAllocator::abandon_gc_alloc_regions() {
116 assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
117 assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
118 _retained_old_gc_alloc_region = NULL;
119 }
120
121 G1PLAB::G1PLAB(size_t gclab_word_size) :
122 PLAB(gclab_word_size), _retired(true) { }
123
124 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
125 // Return the remaining space in the cur alloc region, but not less than
126 // the min TLAB size.
127
128 // Also, this value can be at most the humongous object threshold,
129 // since we can't allow tlabs to grow big enough to accommodate
130 // humongous objects.
131
132 HeapRegion* hr = mutator_alloc_region(context)->get();
133 size_t max_tlab = _g1h->max_tlab_size() * wordSize;
134 if (hr == NULL) {
135 return max_tlab;
136 } else {
137 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
138 }
139 }
140
148 word_size, temp, p2i(result));
149 return result;
150 }
151
152 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
153 size_t min_word_size,
154 size_t desired_word_size,
155 size_t* actual_word_size,
156 AllocationContext_t context) {
157 switch (dest.value()) {
158 case InCSetState::Young:
159 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
160 case InCSetState::Old:
161 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
162 default:
163 ShouldNotReachHere();
164 return NULL; // Keep some compilers happy
165 }
166 }
167
168 bool G1Allocator::survivor_is_full(AllocationContext_t context) const {
169 return _survivor_is_full;
170 }
171
172 bool G1Allocator::old_is_full(AllocationContext_t context) const {
173 return _old_is_full;
174 }
175
176 void G1Allocator::set_survivor_full(AllocationContext_t context) {
177 _survivor_is_full = true;
178 }
179
180 void G1Allocator::set_old_full(AllocationContext_t context) {
181 _old_is_full = true;
182 }
183
184 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
185 size_t desired_word_size,
186 size_t* actual_word_size,
187 AllocationContext_t context) {
188 assert(!_g1h->is_humongous(desired_word_size),
189 "we should not be seeing humongous-size allocations in this path");
190
191 HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
192 desired_word_size,
193 actual_word_size,
194 false /* bot_updates */);
195 if (result == NULL && !survivor_is_full(context)) {
196 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
197 result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
198 desired_word_size,
199 actual_word_size,
200 false /* bot_updates */);
201 if (result == NULL) {
202 set_survivor_full(context);
203 }
213 size_t* actual_word_size,
214 AllocationContext_t context) {
215 assert(!_g1h->is_humongous(desired_word_size),
216 "we should not be seeing humongous-size allocations in this path");
217
218 HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
219 desired_word_size,
220 actual_word_size,
221 true /* bot_updates */);
222 if (result == NULL && !old_is_full(context)) {
223 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
224 result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
225 desired_word_size,
226 actual_word_size,
227 true /* bot_updates */);
228 if (result == NULL) {
229 set_old_full(context);
230 }
231 }
232 return result;
233 }
234
235 void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
236 _survivor_is_full = false;
237 _old_is_full = false;
238 }
239
240 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
241 _g1h(G1CollectedHeap::heap()),
242 _allocator(allocator),
243 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
244 for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
245 _direct_allocated[i] = 0;
246 }
247 }
248
249 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
250 return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
251 }
252
253 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
254 size_t word_sz,
255 AllocationContext_t context,
256 bool* plab_refill_failed) {
257 size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1AllocRegion.inline.hpp"
28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 #include "gc/g1/g1CollectorPolicy.hpp"
30 #include "gc/g1/g1MarkSweep.hpp"
31 #include "gc/g1/heapRegion.inline.hpp"
32 #include "gc/g1/heapRegionSet.inline.hpp"
33
34 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
35 G1Allocator(heap),
36 _survivor_is_full(false),
37 _old_is_full(false),
38 _retained_old_gc_alloc_region(NULL),
39 _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
40 _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
41 }
42
43 void G1DefaultAllocator::init_mutator_alloc_region() {
44 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
45 _mutator_alloc_region.init();
46 }
47
48 void G1DefaultAllocator::release_mutator_alloc_region() {
49 _mutator_alloc_region.release();
50 assert(_mutator_alloc_region.get() == NULL, "post-condition");
51 }
52
53 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
54 OldGCAllocRegion* old,
55 HeapRegion** retained_old) {
56 HeapRegion* retained_region = *retained_old;
57 *retained_old = NULL;
72 !(retained_region->top() == retained_region->end()) &&
73 !retained_region->is_empty() &&
74 !retained_region->is_humongous()) {
75 retained_region->record_timestamp();
76 // The retained region was added to the old region set when it was
77 // retired. We have to remove it now, since we don't allow regions
78 // we allocate to in the region sets. We'll re-add it later, when
79 // it's retired again.
80 _g1h->old_set_remove(retained_region);
81 bool during_im = _g1h->collector_state()->during_initial_mark_pause();
82 retained_region->note_start_of_copying(during_im);
83 old->set(retained_region);
84 _g1h->hr_printer()->reuse(retained_region);
85 evacuation_info.set_alloc_regions_used_before(retained_region->used());
86 }
87 }
88
89 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
90 assert_at_safepoint(true /* should_be_vm_thread */);
91
92 _survivor_is_full = false;
93 _old_is_full = false;
94
95 _survivor_gc_alloc_region.init();
96 _old_gc_alloc_region.init();
97 reuse_retained_old_region(evacuation_info,
98 &_old_gc_alloc_region,
99 &_retained_old_gc_alloc_region);
100 }
101
102 void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
103 AllocationContext_t context = AllocationContext::current();
104 evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
105 old_gc_alloc_region(context)->count());
106 survivor_gc_alloc_region(context)->release();
107 // If we have an old GC alloc region to release, we'll save it in
108 // _retained_old_gc_alloc_region. If we don't
109 // _retained_old_gc_alloc_region will become NULL. This is what we
110 // want either way so no reason to check explicitly for either
111 // condition.
112 _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
113 if (_retained_old_gc_alloc_region != NULL) {
114 _retained_old_gc_alloc_region->record_retained_region();
115 }
116 }
117
118 void G1DefaultAllocator::abandon_gc_alloc_regions() {
119 assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
120 assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
121 _retained_old_gc_alloc_region = NULL;
122 }
123
124 bool G1DefaultAllocator::survivor_is_full(AllocationContext_t context) const {
125 return _survivor_is_full;
126 }
127
128 bool G1DefaultAllocator::old_is_full(AllocationContext_t context) const {
129 return _old_is_full;
130 }
131
132 void G1DefaultAllocator::set_survivor_full(AllocationContext_t context) {
133 _survivor_is_full = true;
134 }
135
136 void G1DefaultAllocator::set_old_full(AllocationContext_t context) {
137 _old_is_full = true;
138 }
139
140 G1PLAB::G1PLAB(size_t gclab_word_size) :
141 PLAB(gclab_word_size), _retired(true) { }
142
143 size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
144 // Return the remaining space in the cur alloc region, but not less than
145 // the min TLAB size.
146
147 // Also, this value can be at most the humongous object threshold,
148 // since we can't allow tlabs to grow big enough to accommodate
149 // humongous objects.
150
151 HeapRegion* hr = mutator_alloc_region(context)->get();
152 size_t max_tlab = _g1h->max_tlab_size() * wordSize;
153 if (hr == NULL) {
154 return max_tlab;
155 } else {
156 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
157 }
158 }
159
167 word_size, temp, p2i(result));
168 return result;
169 }
170
171 HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
172 size_t min_word_size,
173 size_t desired_word_size,
174 size_t* actual_word_size,
175 AllocationContext_t context) {
176 switch (dest.value()) {
177 case InCSetState::Young:
178 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
179 case InCSetState::Old:
180 return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
181 default:
182 ShouldNotReachHere();
183 return NULL; // Keep some compilers happy
184 }
185 }
186
187 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
188 size_t desired_word_size,
189 size_t* actual_word_size,
190 AllocationContext_t context) {
191 assert(!_g1h->is_humongous(desired_word_size),
192 "we should not be seeing humongous-size allocations in this path");
193
194 HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
195 desired_word_size,
196 actual_word_size,
197 false /* bot_updates */);
198 if (result == NULL && !survivor_is_full(context)) {
199 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
200 result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
201 desired_word_size,
202 actual_word_size,
203 false /* bot_updates */);
204 if (result == NULL) {
205 set_survivor_full(context);
206 }
216 size_t* actual_word_size,
217 AllocationContext_t context) {
218 assert(!_g1h->is_humongous(desired_word_size),
219 "we should not be seeing humongous-size allocations in this path");
220
221 HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
222 desired_word_size,
223 actual_word_size,
224 true /* bot_updates */);
225 if (result == NULL && !old_is_full(context)) {
226 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
227 result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
228 desired_word_size,
229 actual_word_size,
230 true /* bot_updates */);
231 if (result == NULL) {
232 set_old_full(context);
233 }
234 }
235 return result;
236 }
237
238 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
239 _g1h(G1CollectedHeap::heap()),
240 _allocator(allocator),
241 _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
242 for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
243 _direct_allocated[i] = 0;
244 }
245 }
246
247 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
248 return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
249 }
250
251 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
252 size_t word_sz,
253 AllocationContext_t context,
254 bool* plab_refill_failed) {
255 size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
|