13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Arguments.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1ConcurrentRefine.hpp"
29 #include "gc/g1/heapRegion.hpp"
30 #include "gc/g1/heapRegionManager.inline.hpp"
31 #include "gc/g1/heapRegionSet.inline.hpp"
32 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
33 #include "memory/allocation.hpp"
34 #include "utilities/bitMap.inline.hpp"
35
36 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
37 public:
38 void check_mt_safety() {
39 // Master Free List MT safety protocol:
40 // (a) If we're at a safepoint, operations on the master free list
41 // should be invoked by either the VM thread (which will serialize
42 // them) or by the GC workers while holding the
43 // FreeList_lock.
44 // (b) If we're not at a safepoint, operations on the master free
45 // list should be invoked while holding the Heap_lock.
46
47 if (SafepointSynchronize::is_at_safepoint()) {
48 guarantee(Thread::current()->is_VM_thread() ||
49 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
50 } else {
51 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
52 }
86 _heap_mapper = heap_storage;
87
88 _prev_bitmap_mapper = prev_bitmap;
89 _next_bitmap_mapper = next_bitmap;
90
91 _bot_mapper = bot;
92 _cardtable_mapper = cardtable;
93
94 _card_counts_mapper = card_counts;
95
96 MemRegion reserved = heap_storage->reserved();
97 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
98
99 _available_map.initialize(_regions.length());
100 }
101
102 bool HeapRegionManager::is_available(uint region) const {
103 return _available_map.at(region);
104 }
105
106 #ifdef ASSERT
107 bool HeapRegionManager::is_free(HeapRegion* hr) const {
108 return _free_list.contains(hr);
109 }
110 #endif
111
112 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
113 G1CollectedHeap* g1h = G1CollectedHeap::heap();
114 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
115 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
116 assert(reserved().contains(mr), "invariant");
117 return g1h->new_heap_region(hrm_index, mr);
118 }
119
120 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
121 guarantee(num_regions > 0, "Must commit more than zero regions");
122 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
123
124 _num_committed += (uint)num_regions;
125
126 _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
127
128 // Also commit auxiliary data
129 _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
130 _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
131
132 _bot_mapper->commit_regions(index, num_regions, pretouch_gang);
133 _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang);
134
135 _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
136 }
137
138 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
139 guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
140 guarantee(_num_committed >= num_regions, "pre-condition");
141
142 // Print before uncommitting.
143 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
144 for (uint i = start; i < start + num_regions; i++) {
145 HeapRegion* hr = at(i);
146 G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
147 }
148 }
149
150 _num_committed -= (uint)num_regions;
151
152 _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
153 _heap_mapper->uncommit_regions(start, num_regions);
154
155 // Also uncommit auxiliary data
156 _prev_bitmap_mapper->uncommit_regions(start, num_regions);
157 _next_bitmap_mapper->uncommit_regions(start, num_regions);
158
159 _bot_mapper->uncommit_regions(start, num_regions);
160 _cardtable_mapper->uncommit_regions(start, num_regions);
161
162 _card_counts_mapper->uncommit_regions(start, num_regions);
163 }
164
165 void HeapRegionManager::make_regions_available(uint start, uint num_regions, WorkGang* pretouch_gang) {
166 guarantee(num_regions > 0, "No point in calling this for zero regions");
167 commit_regions(start, num_regions, pretouch_gang);
168 for (uint i = start; i < start + num_regions; i++) {
169 if (_regions.get_by_index(i) == NULL) {
170 HeapRegion* new_hr = new_heap_region(i);
171 OrderAccess::storestore();
172 _regions.set_by_index(i, new_hr);
173 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
174 }
175 }
176
177 _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
178
179 for (uint i = start; i < start + num_regions; i++) {
180 assert(is_available(i), "Just made region %u available but is apparently not.", i);
181 HeapRegion* hr = at(i);
182 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
183 G1CollectedHeap::heap()->hr_printer()->commit(hr);
184 }
185 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
186 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
187
188 hr->initialize(mr);
189 insert_into_free_list(at(i));
190 }
191 }
192
193 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
194 size_t used_sz =
195 _prev_bitmap_mapper->committed_size() +
196 _next_bitmap_mapper->committed_size() +
197 _bot_mapper->committed_size() +
198 _cardtable_mapper->committed_size() +
199 _card_counts_mapper->committed_size();
200
201 size_t committed_sz =
202 _prev_bitmap_mapper->reserved_size() +
203 _next_bitmap_mapper->reserved_size() +
204 _bot_mapper->reserved_size() +
205 _cardtable_mapper->reserved_size() +
206 _card_counts_mapper->reserved_size();
207
208 return MemoryUsage(0, used_sz, committed_sz, committed_sz);
209 }
210
217 return 0;
218 }
219
220 uint cur = start;
221 uint idx_last_found = 0;
222 uint num_last_found = 0;
223
224 uint expanded = 0;
225
226 while (expanded < num_regions &&
227 (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
228 uint to_expand = MIN2(num_regions - expanded, num_last_found);
229 make_regions_available(idx_last_found, to_expand, pretouch_workers);
230 expanded += to_expand;
231 cur = idx_last_found + num_last_found + 1;
232 }
233
234 verify_optional();
235 return expanded;
236 }
237
238 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
239 uint found = 0;
240 size_t length_found = 0;
241 uint cur = 0;
242
243 while (length_found < num && cur < max_length()) {
244 HeapRegion* hr = _regions.get_by_index(cur);
245 if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
246 // This region is a potential candidate for allocation into.
247 length_found++;
248 } else {
249 // This region is not a candidate. The next region is the next possible one.
250 found = cur + 1;
251 length_found = 0;
252 }
253 cur++;
254 }
255
256 if (length_found == num) {
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Arguments.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1ConcurrentRefine.hpp"
29 #include "gc/g1/heapRegion.hpp"
30 #include "gc/g1/heapRegionManager.inline.hpp"
31 #include "gc/g1/heapRegionSet.inline.hpp"
32 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
33 #include "logging/logStream.hpp"
34 #include "memory/allocation.hpp"
35 #include "utilities/bitMap.inline.hpp"
36
37 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
38 public:
39 void check_mt_safety() {
40 // Master Free List MT safety protocol:
41 // (a) If we're at a safepoint, operations on the master free list
42 // should be invoked by either the VM thread (which will serialize
43 // them) or by the GC workers while holding the
44 // FreeList_lock.
45 // (b) If we're not at a safepoint, operations on the master free
46 // list should be invoked while holding the Heap_lock.
47
48 if (SafepointSynchronize::is_at_safepoint()) {
49 guarantee(Thread::current()->is_VM_thread() ||
50 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
51 } else {
52 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
53 }
87 _heap_mapper = heap_storage;
88
89 _prev_bitmap_mapper = prev_bitmap;
90 _next_bitmap_mapper = next_bitmap;
91
92 _bot_mapper = bot;
93 _cardtable_mapper = cardtable;
94
95 _card_counts_mapper = card_counts;
96
97 MemRegion reserved = heap_storage->reserved();
98 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
99
100 _available_map.initialize(_regions.length());
101 }
102
103 bool HeapRegionManager::is_available(uint region) const {
104 return _available_map.at(region);
105 }
106
107 // If log is enabled, compare actual node index and the node index. If those are different
108 // return the actual node index.
109 // If log is disabled, just return the node index.
110 static uint verify_actual_node_index(HeapWord* addr, uint node_index) {
111 LogTarget(Debug, gc, heap, numa, verification) lt;
112
113 if (lt.is_enabled()) {
114 LogStream ls(lt);
115
116 uint actual_node_index = G1MemoryNodeManager::mgr()->index_of_address(addr);
117 if (node_index != actual_node_index) {
118 ls.print_cr("Heap Region (%u) has different node index. actual index=%u, index=%u",
119 G1CollectedHeap::heap()->addr_to_region(addr), actual_node_index, node_index);
120 }
121 return actual_node_index;
122 }
123 return node_index;
124 }
125
126 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
127 G1MemoryNodeManager* mgr = G1MemoryNodeManager::mgr();
128 HeapRegion* hr = NULL;
129 bool from_head = !type.is_young();
130
131 if (mgr->num_active_nodes() > 1 && mgr->is_valid_node_index(requested_node_index)) {
132 // Try to allocate with requested node index.
133 hr = _free_list.remove_region_with_node_index(from_head, requested_node_index, NULL);
134 }
135
136 if (hr == NULL) {
137 // If there's a single active node or we did not get a region from our requested node,
138 // try without requested node index.
139 hr = _free_list.remove_region(from_head);
140 }
141
142 if (hr != NULL) {
143 assert(hr->next() == NULL, "Single region should not have next");
144 assert(is_available(hr->hrm_index()), "Must be committed");
145
146 verify_actual_node_index(hr->bottom(), hr->node_index());
147 }
148
149 return hr;
150 }
151
152 #ifdef ASSERT
153 bool HeapRegionManager::is_free(HeapRegion* hr) const {
154 return _free_list.contains(hr);
155 }
156 #endif
157
158 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
159 G1CollectedHeap* g1h = G1CollectedHeap::heap();
160 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
161 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
162 assert(reserved().contains(mr), "invariant");
163 return g1h->new_heap_region(hrm_index, mr);
164 }
165
166 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
167 guarantee(num_regions > 0, "Must commit more than zero regions");
168 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
169
170 _num_committed += (uint)num_regions;
171
172 _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
173
174 // Also commit auxiliary data
175 _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
176 _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
177
178 _bot_mapper->commit_regions(index, num_regions, pretouch_gang);
179 _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang);
180
181 _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
182 }
183
184 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
185 guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
186 guarantee(_num_committed >= num_regions, "pre-condition");
187
188 // Reset node index to distinguish with committed regions.
189 for (uint i = start; i < start + num_regions; i++) {
190 at(i)->set_node_index(G1MemoryNodeManager::InvalidNodeIndex);
191 }
192
193 // Print before uncommitting.
194 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
195 for (uint i = start; i < start + num_regions; i++) {
196 HeapRegion* hr = at(i);
197 G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
198 }
199 }
200
201 _num_committed -= (uint)num_regions;
202
203 _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
204 _heap_mapper->uncommit_regions(start, num_regions);
205
206 // Also uncommit auxiliary data
207 _prev_bitmap_mapper->uncommit_regions(start, num_regions);
208 _next_bitmap_mapper->uncommit_regions(start, num_regions);
209
210 _bot_mapper->uncommit_regions(start, num_regions);
211 _cardtable_mapper->uncommit_regions(start, num_regions);
212
213 _card_counts_mapper->uncommit_regions(start, num_regions);
214 }
215
216 static void print_node_id_of_regions(uint start, uint num_regions) {
217 LogTarget(Trace, gc, heap, numa) lt;
218
219 if (lt.is_enabled()) {
220 LogStream ls(lt);
221
222 // Below logs are checked by TestG1NUMATouchRegions.java.
223 ls.print_cr("Numa id of heap regions from %u to %u", start, start + num_regions - 1);
224 ls.print_cr("Heap Region# : numa id of pages");
225
226 for (uint i = start; i < start + num_regions; i++) {
227 ls.print_cr("%6u : %02u", i, G1CollectedHeap::heap()->region_at(i)->node_index());
228 }
229 }
230 }
231
232 // Set node index of the given HeapRegion.
233 // If AlwaysPreTouch is enabled, set with actual node index.
234 // If it is disabled, set with preferred node index which is already decided.
235 static void set_heapregion_node_index(HeapRegion* hr) {
236 uint node_index = G1MemoryNodeManager::mgr()->preferred_index_for_address(hr->bottom());
237 if(AlwaysPreTouch) {
238 // If we already pretouched, we can check actual node index here.
239 node_index = verify_actual_node_index(hr->bottom(), node_index);
240 }
241 hr->set_node_index(node_index);
242 }
243
244 void HeapRegionManager::make_regions_available(uint start, uint num_regions, WorkGang* pretouch_gang) {
245 guarantee(num_regions > 0, "No point in calling this for zero regions");
246 commit_regions(start, num_regions, pretouch_gang);
247 for (uint i = start; i < start + num_regions; i++) {
248 if (_regions.get_by_index(i) == NULL) {
249 HeapRegion* new_hr = new_heap_region(i);
250 OrderAccess::storestore();
251 _regions.set_by_index(i, new_hr);
252 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
253 }
254 }
255
256 _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
257
258 for (uint i = start; i < start + num_regions; i++) {
259 assert(is_available(i), "Just made region %u available but is apparently not.", i);
260 HeapRegion* hr = at(i);
261 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
262 G1CollectedHeap::heap()->hr_printer()->commit(hr);
263 }
264 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
265 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
266
267 hr->initialize(mr);
268 // Set node index of the heap region after initialization but before inserting
269 // to free list.
270 set_heapregion_node_index(hr);
271 insert_into_free_list(at(i));
272 }
273
274 print_node_id_of_regions(start, num_regions);
275 }
276
277 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
278 size_t used_sz =
279 _prev_bitmap_mapper->committed_size() +
280 _next_bitmap_mapper->committed_size() +
281 _bot_mapper->committed_size() +
282 _cardtable_mapper->committed_size() +
283 _card_counts_mapper->committed_size();
284
285 size_t committed_sz =
286 _prev_bitmap_mapper->reserved_size() +
287 _next_bitmap_mapper->reserved_size() +
288 _bot_mapper->reserved_size() +
289 _cardtable_mapper->reserved_size() +
290 _card_counts_mapper->reserved_size();
291
292 return MemoryUsage(0, used_sz, committed_sz, committed_sz);
293 }
294
301 return 0;
302 }
303
304 uint cur = start;
305 uint idx_last_found = 0;
306 uint num_last_found = 0;
307
308 uint expanded = 0;
309
310 while (expanded < num_regions &&
311 (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
312 uint to_expand = MIN2(num_regions - expanded, num_last_found);
313 make_regions_available(idx_last_found, to_expand, pretouch_workers);
314 expanded += to_expand;
315 cur = idx_last_found + num_last_found + 1;
316 }
317
318 verify_optional();
319 return expanded;
320 }
321
322 uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
323 uint expand_candidate = UINT_MAX;
324 for (uint i = 0; i < max_length(); i++) {
325 if (is_available(i)) {
326 // Already in use continue
327 continue;
328 }
329 // Always save the candidate so we can expand later on.
330 expand_candidate = i;
331 if (is_on_preferred_index(expand_candidate, preferred_index)) {
332 // We have found a candidate on the preffered node, break.
333 break;
334 }
335 }
336
337 if (expand_candidate == UINT_MAX) {
338 // No regions left, expand failed.
339 return 0;
340 }
341
342 make_regions_available(expand_candidate, 1, NULL);
343 return 1;
344 }
345
346 bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
347 uint region_node_index = G1MemoryNodeManager::mgr()->preferred_index_for_address(
348 G1CollectedHeap::heap()->bottom_addr_for_region(region_index));
349 return region_node_index == preferred_node_index ||
350 preferred_node_index == G1MemoryNodeManager::AnyNodeIndex;
351 }
352
353 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
354 uint found = 0;
355 size_t length_found = 0;
356 uint cur = 0;
357
358 while (length_found < num && cur < max_length()) {
359 HeapRegion* hr = _regions.get_by_index(cur);
360 if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
361 // This region is a potential candidate for allocation into.
362 length_found++;
363 } else {
364 // This region is not a candidate. The next region is the next possible one.
365 found = cur + 1;
366 length_found = 0;
367 }
368 cur++;
369 }
370
371 if (length_found == num) {
|