11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1AllocRegion.inline.hpp"
28 #include "gc/g1/g1EvacStats.inline.hpp"
29 #include "gc/g1/g1EvacuationInfo.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1Policy.hpp"
32 #include "gc/g1/heapRegion.inline.hpp"
33 #include "gc/g1/heapRegionSet.inline.hpp"
34 #include "gc/g1/heapRegionType.hpp"
35 #include "utilities/align.hpp"
36
37 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
38 _g1h(heap),
39 _survivor_is_full(false),
40 _old_is_full(false),
41 _mutator_alloc_region(),
42 _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
43 _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
44 _retained_old_gc_alloc_region(NULL) {
45 }
46
47 void G1Allocator::init_mutator_alloc_region() {
48 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
49 _mutator_alloc_region.init();
50 }
51
52 void G1Allocator::release_mutator_alloc_region() {
53 _mutator_alloc_region.release();
54 assert(_mutator_alloc_region.get() == NULL, "post-condition");
55 }
56
57 bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
58 return _retained_old_gc_alloc_region == hr;
59 }
60
61 void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info,
62 OldGCAllocRegion* old,
63 HeapRegion** retained_old) {
64 HeapRegion* retained_region = *retained_old;
65 *retained_old = NULL;
66 assert(retained_region == NULL || !retained_region->is_archive(),
67 "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
68
69 // We will discard the current GC alloc region if:
70 // a) it's in the collection set (it can happen!),
71 // b) it's already full (no point in using it),
72 // c) it's empty (this means that it was emptied during
73 // a cleanup and it should be on the free list now), or
74 // d) it's humongous (this means that it was emptied
129 bool G1Allocator::old_is_full() const {
130 return _old_is_full;
131 }
132
133 void G1Allocator::set_survivor_full() {
134 _survivor_is_full = true;
135 }
136
137 void G1Allocator::set_old_full() {
138 _old_is_full = true;
139 }
140
141 size_t G1Allocator::unsafe_max_tlab_alloc() {
142 // Return the remaining space in the cur alloc region, but not less than
143 // the min TLAB size.
144
145 // Also, this value can be at most the humongous object threshold,
146 // since we can't allow tlabs to grow big enough to accommodate
147 // humongous objects.
148
149 HeapRegion* hr = mutator_alloc_region()->get();
150 size_t max_tlab = _g1h->max_tlab_size() * wordSize;
151 if (hr == NULL) {
152 return max_tlab;
153 } else {
154 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
155 }
156 }
157
158 size_t G1Allocator::used_in_alloc_regions() {
159 assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
160 return mutator_alloc_region()->used_in_alloc_regions();
161 }
162
163
164 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
165 size_t word_size) {
166 size_t temp = 0;
167 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
168 assert(result == NULL || temp == word_size,
169 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
170 word_size, temp, p2i(result));
171 return result;
172 }
173
174 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
175 size_t min_word_size,
176 size_t desired_word_size,
177 size_t* actual_word_size) {
178 switch (dest.type()) {
179 case G1HeapRegionAttr::Young:
180 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1AllocRegion.inline.hpp"
28 #include "gc/g1/g1EvacStats.inline.hpp"
29 #include "gc/g1/g1EvacuationInfo.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1NUMA.hpp"
32 #include "gc/g1/g1Policy.hpp"
33 #include "gc/g1/heapRegion.inline.hpp"
34 #include "gc/g1/heapRegionSet.inline.hpp"
35 #include "gc/g1/heapRegionType.hpp"
36 #include "utilities/align.hpp"
37
38 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
39 _g1h(heap),
40 _numa(heap->numa()),
41 _survivor_is_full(false),
42 _old_is_full(false),
43 _num_alloc_regions(_numa->num_active_nodes()),
44 _mutator_alloc_regions(NULL),
45 _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
46 _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
47 _retained_old_gc_alloc_region(NULL) {
48
49 _mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC);
50 for (uint i = 0; i < _num_alloc_regions; i++) {
51 ::new(_mutator_alloc_regions + i) MutatorAllocRegion(i);
52 }
53 }
54
55 G1Allocator::~G1Allocator() {
56 for (uint i = 0; i < _num_alloc_regions; i++) {
57 _mutator_alloc_regions[i].~MutatorAllocRegion();
58 }
59 FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_regions);
60 }
61
62 #ifdef ASSERT
63 bool G1Allocator::has_mutator_alloc_region() {
64 uint node_index = current_node_index();
65 return mutator_alloc_region(node_index)->get() != NULL;
66 }
67 #endif
68
69 void G1Allocator::init_mutator_alloc_regions() {
70 for (uint i = 0; i < _num_alloc_regions; i++) {
71 assert(mutator_alloc_region(i)->get() == NULL, "pre-condition");
72 mutator_alloc_region(i)->init();
73 }
74 }
75
76 void G1Allocator::release_mutator_alloc_regions() {
77 for (uint i = 0; i < _num_alloc_regions; i++) {
78 mutator_alloc_region(i)->release();
79 assert(mutator_alloc_region(i)->get() == NULL, "post-condition");
80 }
81 }
82
83 bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
84 return _retained_old_gc_alloc_region == hr;
85 }
86
87 void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info,
88 OldGCAllocRegion* old,
89 HeapRegion** retained_old) {
90 HeapRegion* retained_region = *retained_old;
91 *retained_old = NULL;
92 assert(retained_region == NULL || !retained_region->is_archive(),
93 "Archive region should not be alloc region (index %u)", retained_region->hrm_index());
94
95 // We will discard the current GC alloc region if:
96 // a) it's in the collection set (it can happen!),
97 // b) it's already full (no point in using it),
98 // c) it's empty (this means that it was emptied during
99 // a cleanup and it should be on the free list now), or
100 // d) it's humongous (this means that it was emptied
155 bool G1Allocator::old_is_full() const {
156 return _old_is_full;
157 }
158
159 void G1Allocator::set_survivor_full() {
160 _survivor_is_full = true;
161 }
162
163 void G1Allocator::set_old_full() {
164 _old_is_full = true;
165 }
166
167 size_t G1Allocator::unsafe_max_tlab_alloc() {
168 // Return the remaining space in the cur alloc region, but not less than
169 // the min TLAB size.
170
171 // Also, this value can be at most the humongous object threshold,
172 // since we can't allow tlabs to grow big enough to accommodate
173 // humongous objects.
174
175 uint node_index = current_node_index();
176 HeapRegion* hr = mutator_alloc_region(node_index)->get();
177 size_t max_tlab = _g1h->max_tlab_size() * wordSize;
178 if (hr == NULL) {
179 return max_tlab;
180 } else {
181 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
182 }
183 }
184
185 size_t G1Allocator::used_in_alloc_regions() {
186 assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
187 size_t used = 0;
188 for (uint i = 0; i < _num_alloc_regions; i++) {
189 used += mutator_alloc_region(i)->used_in_alloc_regions();
190 }
191 return used;
192 }
193
194
195 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
196 size_t word_size) {
197 size_t temp = 0;
198 HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
199 assert(result == NULL || temp == word_size,
200 "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
201 word_size, temp, p2i(result));
202 return result;
203 }
204
205 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
206 size_t min_word_size,
207 size_t desired_word_size,
208 size_t* actual_word_size) {
209 switch (dest.type()) {
210 case G1HeapRegionAttr::Young:
211 return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|