19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "memory/virtualspace.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/os.inline.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/align.hpp"
34
35 // ReservedSpace
36
37 // Dummy constructor
38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
39 _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
40 }
41
42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
43 bool has_preferred_page_size = preferred_page_size != 0;
44 // Want to use large pages where possible and pad with small pages.
45 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
46 bool large_pages = page_size != (size_t)os::vm_page_size();
47 size_t alignment;
48 if (large_pages && has_preferred_page_size) {
49 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
50 // ReservedSpace initialization requires size to be aligned to the given
51 // alignment. Align the size up.
52 size = align_up(size, alignment);
53 } else {
54 // Don't force the alignment to be large page aligned,
55 // since that will waste memory.
56 alignment = os::vm_allocation_granularity();
57 }
58 initialize(size, alignment, large_pages, NULL, false);
59 }
60
61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
62 bool large,
63 char* requested_address) : _fd_for_heap(-1) {
64 initialize(size, alignment, large, requested_address, false);
65 }
66
67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
68 bool large,
69 bool executable) : _fd_for_heap(-1) {
70 initialize(size, alignment, large, NULL, executable);
71 }
72
73 // Helper method
74 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
75 if (is_file_mapped) {
76 if (!os::unmap_memory(base, size)) {
77 fatal("os::unmap_memory failed");
78 }
79 } else if (!os::release_memory(base, size)) {
80 fatal("os::release_memory failed");
81 }
82 }
83
84 // Helper method.
85 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
86 const size_t size, bool special, bool is_file_mapped = false)
87 {
88 if (base == requested_address || requested_address == NULL)
89 return false; // did not fail
126 _noaccess_prefix = 0;
127 if (size == 0) {
128 return;
129 }
130
131 // If OS doesn't support demand paging for large page memory, we need
132 // to use reserve_memory_special() to reserve and pin the entire region.
133 // If there is a backing file directory for this space then whether
134 // large pages are allocated is up to the filesystem of the backing file.
135 // So we ignore the UseLargePages flag in this case.
136 bool special = large && !os::can_commit_large_page_memory();
137 if (special && _fd_for_heap != -1) {
138 special = false;
139 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
140 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
141 log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
142 }
143 }
144
145 char* base = NULL;
146
147 if (special) {
148
149 base = os::reserve_memory_special(size, alignment, requested_address, executable);
150
151 if (base != NULL) {
152 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
153 // OS ignored requested address. Try different address.
154 return;
155 }
156 // Check alignment constraints.
157 assert((uintptr_t) base % alignment == 0,
158 "Large pages returned a non-aligned address, base: "
159 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
160 p2i(base), alignment);
161 _special = true;
162 } else {
163 // failed; try to reserve regular memory below
164 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
165 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
167 }
168 }
169 }
170
171 if (base == NULL) {
172 // Optimistically assume that the OSes returns an aligned base pointer.
173 // When reserving a large address range, most OSes seem to align to at
174 // least 64K.
175
176 // If the memory was requested at a particular address, use
177 // os::attempt_reserve_memory_at() to avoid over mapping something
178 // important. If available space is not detected, return NULL.
179
180 if (requested_address != 0) {
181 base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
182 if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
183 // OS ignored requested address. Try different address.
184 base = NULL;
185 }
186 } else {
187 base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
188 }
189
190 if (base == NULL) return;
191
192 // Check alignment constraints
193 if ((((size_t)base) & (alignment - 1)) != 0) {
194 // Base not aligned, retry
195 unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
196
197 // Make sure that size is aligned
198 size = align_up(size, alignment);
199 base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
200
201 if (requested_address != 0 &&
202 failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
203 // As a result of the alignment constraints, the allocated base differs
204 // from the requested address. Return back to the caller who can
205 // take remedial action (like try again without a requested address).
206 assert(_base == NULL, "should be");
207 return;
208 }
209 }
210 }
211 // Done
212 _base = base;
213 _size = size;
214 _alignment = alignment;
215 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
216 if (_fd_for_heap != -1) {
217 _special = true;
218 }
219 }
220
221
222 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
223 bool special, bool executable) {
224 assert((size % os::vm_allocation_granularity()) == 0,
225 "size not allocation aligned");
226 _base = base;
227 _size = size;
228 _alignment = alignment;
229 _noaccess_prefix = 0;
230 _special = special;
231 _executable = executable;
232 }
233
234
235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
236 bool split, bool realloc) {
237 assert(partition_size <= size(), "partition failed");
238 if (split) {
239 os::split_reserved_memory(base(), size(), partition_size, realloc);
240 }
241 ReservedSpace result(base(), partition_size, alignment, special(),
242 executable());
243 return result;
244 }
245
246
247 ReservedSpace
248 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
249 assert(partition_size <= size(), "partition failed");
250 ReservedSpace result(base() + partition_size, size() - partition_size,
251 alignment, special(), executable());
252 return result;
253 }
254
260
261 size_t ReservedSpace::page_align_size_down(size_t size) {
262 return align_down(size, os::vm_page_size());
263 }
264
265
266 size_t ReservedSpace::allocation_align_size_up(size_t size) {
267 return align_up(size, os::vm_allocation_granularity());
268 }
269
270
271 size_t ReservedSpace::allocation_align_size_down(size_t size) {
272 return align_down(size, os::vm_allocation_granularity());
273 }
274
275
276 void ReservedSpace::release() {
277 if (is_reserved()) {
278 char *real_base = _base - _noaccess_prefix;
279 const size_t real_size = _size + _noaccess_prefix;
280 if (special()) {
281 if (_fd_for_heap != -1) {
282 os::unmap_memory(real_base, real_size);
283 } else {
284 os::release_memory_special(real_base, real_size);
285 }
286 } else{
287 os::release_memory(real_base, real_size);
288 }
289 _base = NULL;
290 _size = 0;
291 _noaccess_prefix = 0;
292 _alignment = 0;
293 _special = false;
294 _executable = false;
295 }
296 }
297
298 static size_t noaccess_prefix_size(size_t alignment) {
299 return lcm(os::vm_page_size(), alignment);
300 }
301
302 void ReservedHeapSpace::establish_noaccess_prefix() {
303 assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
304 _noaccess_prefix = noaccess_prefix_size(_alignment);
305
306 if (base() && base() + _size > (char *)OopEncodingHeapMax) {
307 if (true
308 WIN64_ONLY(&& !UseLargePages)
309 AIX_ONLY(&& os::vm_page_size() != 64*K)) {
324
325 _base += _noaccess_prefix;
326 _size -= _noaccess_prefix;
327 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
328 }
329
330 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
331 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
332 // might still fulfill the wishes of the caller.
333 // Assures the memory is aligned to 'alignment'.
334 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
335 void ReservedHeapSpace::try_reserve_heap(size_t size,
336 size_t alignment,
337 bool large,
338 char* requested_address) {
339 if (_base != NULL) {
340 // We tried before, but we didn't like the address delivered.
341 release();
342 }
343
344 // If OS doesn't support demand paging for large page memory, we need
345 // to use reserve_memory_special() to reserve and pin the entire region.
346 // If there is a backing file directory for this space then whether
347 // large pages are allocated is up to the filesystem of the backing file.
348 // So we ignore the UseLargePages flag in this case.
349 bool special = large && !os::can_commit_large_page_memory();
350 if (special && _fd_for_heap != -1) {
351 special = false;
352 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
353 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
354 log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
355 }
356 }
357 char* base = NULL;
358
359 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
360 " heap of size " SIZE_FORMAT_HEX,
361 p2i(requested_address),
362 size);
363
364 if (special) {
365 base = os::reserve_memory_special(size, alignment, requested_address, false);
366
367 if (base != NULL) {
368 // Check alignment constraints.
369 assert((uintptr_t) base % alignment == 0,
370 "Large pages returned a non-aligned address, base: "
371 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
372 p2i(base), alignment);
373 _special = true;
374 }
375 }
376
377 if (base == NULL) {
378 // Failed; try to reserve regular memory below
379 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
380 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
381 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
382 }
383
384 // Optimistically assume that the OSes returns an aligned base pointer.
385 // When reserving a large address range, most OSes seem to align to at
386 // least 64K.
387
388 // If the memory was requested at a particular address, use
389 // os::attempt_reserve_memory_at() to avoid over mapping something
390 // important. If available space is not detected, return NULL.
391
392 if (requested_address != 0) {
393 base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
394 } else {
395 base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
396 }
397 }
398 if (base == NULL) { return; }
399
400 // Done
401 _base = base;
402 _size = size;
403 _alignment = alignment;
404
405 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
406 if (_fd_for_heap != -1) {
407 _special = true;
408 }
409
410 // Check alignment constraints
411 if ((((size_t)base) & (alignment - 1)) != 0) {
412 // Base not aligned, retry.
413 release();
414 }
415 }
416
417 void ReservedHeapSpace::try_reserve_range(char *highest_start,
418 char *lowest_start,
419 size_t attach_point_alignment,
420 char *aligned_heap_base_min_address,
421 char *upper_bound,
422 size_t size,
584 char** addresses = get_attach_addresses_for_disjoint_mode();
585 int i = 0;
586 while (addresses[i] && // End of array not yet reached.
587 ((_base == NULL) || // No previous try succeeded.
588 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
589 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
590 char* const attach_point = addresses[i];
591 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
592 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
593 i++;
594 }
595
596 // Last, desperate try without any placement.
597 if (_base == NULL) {
598 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
599 initialize(size + noaccess_prefix, alignment, large, NULL, false);
600 }
601 }
602 }
603
604 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
605
606 if (size == 0) {
607 return;
608 }
609
610 if (heap_allocation_directory != NULL) {
611 _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
612 if (_fd_for_heap == -1) {
613 vm_exit_during_initialization(
614 err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
615 }
616 }
617
618 // Heap size should be aligned to alignment, too.
619 guarantee(is_aligned(size, alignment), "set by caller");
620
621 if (UseCompressedOops) {
622 initialize_compressed_heap(size, alignment, large);
623 if (_size > size) {
624 // We allocated heap with noaccess prefix.
625 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
626 // if we had to try at arbitrary address.
627 establish_noaccess_prefix();
628 }
629 } else {
630 initialize(size, alignment, large, NULL, false);
631 }
632
633 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
634 "area must be distinguishable from marks for mark-sweep");
635 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
636 "area must be distinguishable from marks for mark-sweep");
637
638 if (base() != NULL) {
639 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
640 }
641
642 if (_fd_for_heap != -1) {
643 os::close(_fd_for_heap);
644 }
645 }
646
647 // Reserve space for code segment. Same as Java heap only we mark this as
648 // executable.
649 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
650 size_t rs_align,
651 bool large) :
652 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
653 MemTracker::record_virtual_memory_type((address)base(), mtCode);
654 }
655
656 // VirtualSpace
657
658 VirtualSpace::VirtualSpace() {
659 _low_boundary = NULL;
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "memory/virtualspace.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/os.inline.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/align.hpp"
34
35 // ReservedSpace
36
37 // Dummy constructor
38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
39 _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0),
40 _alignment(0), _special(false), _executable(false), _fd_for_heap(-1), _fd_for_nvdimm(-1) {
41 }
42
43 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _fd_for_nvdimm(-1),
44 _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
45 bool has_preferred_page_size = preferred_page_size != 0;
46 // Want to use large pages where possible and pad with small pages.
47 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
48 bool large_pages = page_size != (size_t)os::vm_page_size();
49 size_t alignment;
50 if (large_pages && has_preferred_page_size) {
51 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
52 // ReservedSpace initialization requires size to be aligned to the given
53 // alignment. Align the size up.
54 size = align_up(size, alignment);
55 } else {
56 // Don't force the alignment to be large page aligned,
57 // since that will waste memory.
58 alignment = os::vm_allocation_granularity();
59 }
60 initialize(size, alignment, large_pages, NULL, false);
61 }
62
63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
64 bool large,
65 char* requested_address) : _fd_for_heap(-1), _fd_for_nvdimm(-1),
66 _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
67 initialize(size, alignment, large, requested_address, false);
68 }
69
70 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
71 bool large,
72 bool executable) : _fd_for_heap(-1), _fd_for_nvdimm(-1),
73 _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
74 initialize(size, alignment, large, NULL, executable);
75 }
76
77 // Helper method
78 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
79 if (is_file_mapped) {
80 if (!os::unmap_memory(base, size)) {
81 fatal("os::unmap_memory failed");
82 }
83 } else if (!os::release_memory(base, size)) {
84 fatal("os::release_memory failed");
85 }
86 }
87
88 // Helper method.
89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
90 const size_t size, bool special, bool is_file_mapped = false)
91 {
92 if (base == requested_address || requested_address == NULL)
93 return false; // did not fail
130 _noaccess_prefix = 0;
131 if (size == 0) {
132 return;
133 }
134
135 // If OS doesn't support demand paging for large page memory, we need
136 // to use reserve_memory_special() to reserve and pin the entire region.
137 // If there is a backing file directory for this space then whether
138 // large pages are allocated is up to the filesystem of the backing file.
139 // So we ignore the UseLargePages flag in this case.
140 bool special = large && !os::can_commit_large_page_memory();
141 if (special && _fd_for_heap != -1) {
142 special = false;
143 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
144 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
145 log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
146 }
147 }
148
149 char* base = NULL;
150 char* nvdimm_base = NULL;
151
152 if (special) {
153
154 base = os::reserve_memory_special(size, alignment, requested_address, executable);
155
156 if (base != NULL) {
157 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
158 // OS ignored requested address. Try different address.
159 return;
160 }
161 // Check alignment constraints.
162 assert((uintptr_t) base % alignment == 0,
163 "Large pages returned a non-aligned address, base: "
164 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
165 p2i(base), alignment);
166 _special = true;
167 } else {
168 // failed; try to reserve regular memory below
169 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
170 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
172 }
173 }
174 }
175
176 if (base == NULL) {
177 // Optimistically assume that the OSes returns an aligned base pointer.
178 // When reserving a large address range, most OSes seem to align to at
179 // least 64K.
180
181 // If the memory was requested at a particular address, use
182 // os::attempt_reserve_memory_at() to avoid over mapping something
183 // important. If available space is not detected, return NULL.
184
185 if (requested_address != 0) {
186 base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
187 if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
188 // OS ignored requested address. Try different address.
189 base = NULL;
190 }
191 } else {
192 if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
193 base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, _fd_for_heap);
194 } else {
195 base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
196 }
197 }
198
199 if (base == NULL) return;
200
201 // Check alignment constraints
202 if ((((size_t)base) & (alignment - 1)) != 0) {
203 // Base not aligned, retry
204 unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
205
206 // Make sure that size is aligned
207 size = align_up(size, alignment);
208 base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
209
210 if (requested_address != 0 &&
211 failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
212 // As a result of the alignment constraints, the allocated base differs
213 // from the requested address. Return back to the caller who can
214 // take remedial action (like try again without a requested address).
215 assert(_base == NULL, "should be");
216 return;
217 }
218 }
219 }
220 // Done
221 _base = base;
222 _nvdimm_base = _base-_nvdimm_size;
223 _nvdimm_base_nv = NULL;
224 _dram_size = (size_t)size;
225 _size = size;
226 _alignment = alignment;
227 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
228 if (_fd_for_heap != -1) {
229 _special = true;
230 }
231 }
232
233
234 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
235 bool special, bool executable) {
236 assert((size % os::vm_allocation_granularity()) == 0,
237 "size not allocation aligned");
238 _base = base;
239 _size = size;
240 _nvdimm_base = NULL;
241 _nvdimm_base_nv = NULL;
242 _dram_size = (size_t)size;
243 _alignment = alignment;
244 _noaccess_prefix = 0;
245 _special = special;
246 _executable = executable;
247 }
248
249 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
250 bool split, bool realloc) {
251 assert(partition_size <= size(), "partition failed");
252 if (split) {
253 os::split_reserved_memory(base(), size(), partition_size, realloc);
254 }
255 ReservedSpace result(base(), partition_size, alignment, special(),
256 executable());
257 return result;
258 }
259
260
261 ReservedSpace
262 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
263 assert(partition_size <= size(), "partition failed");
264 ReservedSpace result(base() + partition_size, size() - partition_size,
265 alignment, special(), executable());
266 return result;
267 }
268
274
275 size_t ReservedSpace::page_align_size_down(size_t size) {
276 return align_down(size, os::vm_page_size());
277 }
278
279
280 size_t ReservedSpace::allocation_align_size_up(size_t size) {
281 return align_up(size, os::vm_allocation_granularity());
282 }
283
284
285 size_t ReservedSpace::allocation_align_size_down(size_t size) {
286 return align_down(size, os::vm_allocation_granularity());
287 }
288
289
290 void ReservedSpace::release() {
291 if (is_reserved()) {
292 char *real_base = _base - _noaccess_prefix;
293 const size_t real_size = _size + _noaccess_prefix;
294 // unmap nvdimm
295 if (_fd_for_nvdimm != -1) {
296 os::unmap_memory(real_base+real_size, _nvdimm_size);
297 }
298 if (special()) {
299 if (_fd_for_heap != -1) {
300 os::unmap_memory(real_base, real_size);
301 } else {
302 os::release_memory_special(real_base, real_size);
303 }
304 } else{
305 os::release_memory(real_base, real_size);
306 }
307 _base = NULL;
308 _nvdimm_base = NULL;
309 _nvdimm_base_nv = NULL;
310 _dram_size = 0;
311 _nvdimm_size = 0;
312 _size = 0;
313 _noaccess_prefix = 0;
314 _alignment = 0;
315 _special = false;
316 _executable = false;
317 }
318 }
319
320 static size_t noaccess_prefix_size(size_t alignment) {
321 return lcm(os::vm_page_size(), alignment);
322 }
323
324 void ReservedHeapSpace::establish_noaccess_prefix() {
325 assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
326 _noaccess_prefix = noaccess_prefix_size(_alignment);
327
328 if (base() && base() + _size > (char *)OopEncodingHeapMax) {
329 if (true
330 WIN64_ONLY(&& !UseLargePages)
331 AIX_ONLY(&& os::vm_page_size() != 64*K)) {
346
347 _base += _noaccess_prefix;
348 _size -= _noaccess_prefix;
349 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
350 }
351
352 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
353 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
354 // might still fulfill the wishes of the caller.
355 // Assures the memory is aligned to 'alignment'.
356 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
357 void ReservedHeapSpace::try_reserve_heap(size_t size,
358 size_t alignment,
359 bool large,
360 char* requested_address) {
361 if (_base != NULL) {
362 // We tried before, but we didn't like the address delivered.
363 release();
364 }
365
366 if (_fd_for_nvdimm != -1 && UseG1GC) {
367 char* base_nv = os::reserve_memory(size, requested_address, alignment);
368 initialize_g1gc_nvdimm_dram_sizes(size, alignment);
369 _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP.
370 }
371
372 // If OS doesn't support demand paging for large page memory, we need
373 // to use reserve_memory_special() to reserve and pin the entire region.
374 // If there is a backing file directory for this space then whether
375 // large pages are allocated is up to the filesystem of the backing file.
376 // So we ignore the UseLargePages flag in this case.
377 bool special = large && !os::can_commit_large_page_memory();
378 if (special && _fd_for_heap != -1) {
379 special = false;
380 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
381 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
382 log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
383 }
384 }
385 char* base = NULL;
386 char* nvdimm_base = NULL;
387
388 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
389 " heap of size " SIZE_FORMAT_HEX,
390 p2i(requested_address),
391 size);
392
393 if (special) {
394 base = os::reserve_memory_special(size, alignment, requested_address, false);
395
396 if (base != NULL) {
397 // Check alignment constraints.
398 assert((uintptr_t) base % alignment == 0,
399 "Large pages returned a non-aligned address, base: "
400 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
401 p2i(base), alignment);
402 _special = true;
403 }
404 }
405
406 if (base == NULL) {
407 // Failed; try to reserve regular memory below
408 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
409 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
410 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
411 }
412
413 // Optimistically assume that the OSes returns an aligned base pointer.
414 // When reserving a large address range, most OSes seem to align to at
415 // least 64K.
416
417 // If the memory was requested at a particular address, use
418 // os::attempt_reserve_memory_at() to avoid over mapping something
419 // important. If available space is not detected, return NULL.
420
421 if (requested_address != 0) {
422 if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
423 // first unmap so that OS does not keep trying.
424 os::unmap_memory(_nvdimm_base_nv, _dram_size);
425 base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv);
426 } else {
427 base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
428 }
429 } else {
430 if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
431 // first unmap so that OS does not keep trying.
432 os::unmap_memory(_nvdimm_base_nv, _dram_size);
433 base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment);
434 } else {
435 base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
436 }
437 }
438 }
439 if (base == NULL) { return; }
440
441 // Done
442 _base = base;
443 _nvdimm_base = _base-_nvdimm_size;
444 if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
445 _size = _dram_size;
446 } else {
447 _size = size;
448 }
449 _alignment = alignment;
450
451 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
452 if (_fd_for_heap != -1) {
453 _special = true;
454 }
455
456 // Check alignment constraints
457 if ((((size_t)base) & (alignment - 1)) != 0) {
458 // Base not aligned, retry.
459 release();
460 }
461 }
462
463 void ReservedHeapSpace::try_reserve_range(char *highest_start,
464 char *lowest_start,
465 size_t attach_point_alignment,
466 char *aligned_heap_base_min_address,
467 char *upper_bound,
468 size_t size,
630 char** addresses = get_attach_addresses_for_disjoint_mode();
631 int i = 0;
632 while (addresses[i] && // End of array not yet reached.
633 ((_base == NULL) || // No previous try succeeded.
634 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
635 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
636 char* const attach_point = addresses[i];
637 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
638 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
639 i++;
640 }
641
642 // Last, desperate try without any placement.
643 if (_base == NULL) {
644 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
645 initialize(size + noaccess_prefix, alignment, large, NULL, false);
646 }
647 }
648 }
649
650 void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) {
651 _dram_size = (size_t)((size * G1MaxNewSizePercent)/100);
652 size_t page_sz = os::vm_page_size() -1 ;
653 _dram_size = (_dram_size + page_sz) & (~page_sz);
654 // align sizes.
655 _dram_size = align_down(_dram_size, alignment);
656 _nvdimm_size = size - _dram_size;
657 _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz);
658 _nvdimm_size = align_down(_nvdimm_size, alignment);
659 }
660
661 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
662
663 if (size == 0) {
664 return;
665 }
666
667 // if AllocateOldGen is used
668 if (AllocateOldGenAt != NULL) {
669 _fd_for_nvdimm = os::create_file_for_heap(AllocateOldGenAt);
670 if (_fd_for_nvdimm == -1) {
671 vm_exit_during_initialization(
672 err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
673 }
674 if (UseParallelOldGC) {
675 // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
676 // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
677 os::allocate_file(_fd_for_nvdimm, MaxHeapSize);
678 os::set_nvdimm_fd(_fd_for_nvdimm);
679 os::set_nvdimm_present(true);
680 }
681 } else {
682 _fd_for_nvdimm = -1;
683 }
684
685 if (heap_allocation_directory != NULL) {
686 _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
687 if (_fd_for_heap == -1) {
688 vm_exit_during_initialization(
689 err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
690 }
691 }
692
693 // Heap size should be aligned to alignment, too.
694 guarantee(is_aligned(size, alignment), "set by caller");
695
696 char* base_nv = NULL;
697 _nvdimm_base_nv = NULL;
698
699 if (_fd_for_nvdimm != -1 && UseG1GC) {
700 if (!UseCompressedOops) {
701 // if compressed oops use requested address.
702 initialize_g1gc_nvdimm_dram_sizes(size, alignment);
703 base_nv = os::reserve_memory(size, NULL, alignment);
704 _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap
705 }
706 }
707
708 if (UseCompressedOops) {
709 initialize_compressed_heap(size, alignment, large);
710 if (_size > size) {
711 // We allocated heap with noaccess prefix.
712 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
713 // if we had to try at arbitrary address.
714 establish_noaccess_prefix();
715 }
716 } else {
717 if (_fd_for_nvdimm != -1 && UseG1GC) {
718 initialize(_dram_size, alignment, large, NULL, false);
719 } else {
720 initialize(size, alignment, large, NULL, false);
721 }
722 }
723
724 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
725 "area must be distinguishable from marks for mark-sweep");
726 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
727 "area must be distinguishable from marks for mark-sweep");
728
729 if (base() != NULL) {
730 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
731 if (_fd_for_nvdimm != -1 && UseG1GC) {
732 os::set_nvdimm_present(true);
733 os::set_dram_heapbase((address)_base);
734 os::set_nvdimm_heapbase((address)_nvdimm_base);
735 os::set_nvdimm_fd(_fd_for_nvdimm);
736 _size += _nvdimm_size;
737 _base = _nvdimm_base;
738 log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size);
739 }
740 }
741
742 if (_fd_for_heap != -1) {
743 os::close(_fd_for_heap);
744 }
745 }
746
747 // Reserve space for code segment. Same as Java heap only we mark this as
748 // executable.
749 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
750 size_t rs_align,
751 bool large) :
752 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
753 MemTracker::record_virtual_memory_type((address)base(), mtCode);
754 }
755
756 // VirtualSpace
757
758 VirtualSpace::VirtualSpace() {
759 _low_boundary = NULL;
|