18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCacheExtensions.hpp"
27 #include "logging/log.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "memory/virtualspace.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "services/memTracker.hpp"
33
34 // ReservedSpace
35
36 // Dummy constructor
37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
38 _alignment(0), _special(false), _executable(false) {
39 }
40
41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
42 bool has_preferred_page_size = preferred_page_size != 0;
43 // Want to use large pages where possible and pad with small pages.
44 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
45 bool large_pages = page_size != (size_t)os::vm_page_size();
46 size_t alignment;
47 if (large_pages && has_preferred_page_size) {
48 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
49 // ReservedSpace initialization requires size to be aligned to the given
50 // alignment. Align the size up.
51 size = align_size_up(size, alignment);
52 } else {
53 // Don't force the alignment to be large page aligned,
54 // since that will waste memory.
55 alignment = os::vm_allocation_granularity();
56 }
57 initialize(size, alignment, large_pages, NULL, false);
58 }
59
60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
61 bool large,
62 char* requested_address) {
63 initialize(size, alignment, large, requested_address, false);
64 }
65
66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
67 bool large,
68 bool executable) {
69 initialize(size, alignment, large, NULL, executable);
70 }
71
72 // Helper method.
73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
74 const size_t size, bool special)
75 {
76 if (base == requested_address || requested_address == NULL)
77 return false; // did not fail
78
79 if (base != NULL) {
80 // Different reserve address may be acceptable in other cases
81 // but for compressed oops heap should be at requested address.
82 assert(UseCompressedOops, "currently requested address used only for compressed oops");
83 log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
84 // OS ignored requested address. Try different address.
85 if (special) {
86 if (!os::release_memory_special(base, size)) {
87 fatal("os::release_memory_special failed");
88 }
89 } else {
90 if (!os::release_memory(base, size)) {
91 fatal("os::release_memory failed");
92 }
93 }
94 }
95 return true;
96 }
97
98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
99 char* requested_address,
100 bool executable) {
101 const size_t granularity = os::vm_allocation_granularity();
102 assert((size & (granularity - 1)) == 0,
103 "size not aligned to os::vm_allocation_granularity()");
104 assert((alignment & (granularity - 1)) == 0,
105 "alignment not aligned to os::vm_allocation_granularity()");
106 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
107 "not a power of 2");
108
109 alignment = MAX2(alignment, (size_t)os::vm_page_size());
110
111 _base = NULL;
112 _size = 0;
113 _special = false;
114 _executable = executable;
115 _alignment = 0;
116 _noaccess_prefix = 0;
117 if (size == 0) {
118 return;
119 }
120
121 // If OS doesn't support demand paging for large page memory, we need
122 // to use reserve_memory_special() to reserve and pin the entire region.
123 bool special = large && !os::can_commit_large_page_memory();
124 char* base = NULL;
125
126 if (special) {
127
128 base = os::reserve_memory_special(size, alignment, requested_address, executable);
129
130 if (base != NULL) {
131 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
132 // OS ignored requested address. Try different address.
133 return;
134 }
135 // Check alignment constraints.
136 assert((uintptr_t) base % alignment == 0,
137 "Large pages returned a non-aligned address, base: "
138 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
139 p2i(base), alignment);
140 _special = true;
141 } else {
142 // failed; try to reserve regular memory below
143 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
144 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
145 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
146 }
147 }
148 }
149
150 if (base == NULL) {
151 // Optimistically assume that the OSes returns an aligned base pointer.
152 // When reserving a large address range, most OSes seem to align to at
153 // least 64K.
154
155 // If the memory was requested at a particular address, use
156 // os::attempt_reserve_memory_at() to avoid over mapping something
157 // important. If available space is not detected, return NULL.
158
159 if (requested_address != 0) {
160 base = os::attempt_reserve_memory_at(size, requested_address);
161 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
162 // OS ignored requested address. Try different address.
163 base = NULL;
164 }
165 } else {
166 base = os::reserve_memory(size, NULL, alignment);
167 }
168
169 if (base == NULL) return;
170
171 // Check alignment constraints
172 if ((((size_t)base) & (alignment - 1)) != 0) {
173 // Base not aligned, retry
174 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
175 // Make sure that size is aligned
176 size = align_size_up(size, alignment);
177 base = os::reserve_memory_aligned(size, alignment);
178
179 if (requested_address != 0 &&
180 failed_to_reserve_as_requested(base, requested_address, size, false)) {
181 // As a result of the alignment constraints, the allocated base differs
182 // from the requested address. Return back to the caller who can
183 // take remedial action (like try again without a requested address).
184 assert(_base == NULL, "should be");
185 return;
186 }
187 }
188 }
189 // Done
190 _base = base;
191 _size = size;
192 _alignment = alignment;
193 }
194
195
196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
197 bool special, bool executable) {
198 assert((size % os::vm_allocation_granularity()) == 0,
199 "size not allocation aligned");
200 _base = base;
201 _size = size;
202 _alignment = alignment;
203 _noaccess_prefix = 0;
204 _special = special;
205 _executable = executable;
206 }
207
208
209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
210 bool split, bool realloc) {
211 assert(partition_size <= size(), "partition failed");
212 if (split) {
235 size_t ReservedSpace::page_align_size_down(size_t size) {
236 return align_size_down(size, os::vm_page_size());
237 }
238
239
240 size_t ReservedSpace::allocation_align_size_up(size_t size) {
241 return align_size_up(size, os::vm_allocation_granularity());
242 }
243
244
245 size_t ReservedSpace::allocation_align_size_down(size_t size) {
246 return align_size_down(size, os::vm_allocation_granularity());
247 }
248
249
250 void ReservedSpace::release() {
251 if (is_reserved()) {
252 char *real_base = _base - _noaccess_prefix;
253 const size_t real_size = _size + _noaccess_prefix;
254 if (special()) {
255 os::release_memory_special(real_base, real_size);
256 } else{
257 os::release_memory(real_base, real_size);
258 }
259 _base = NULL;
260 _size = 0;
261 _noaccess_prefix = 0;
262 _alignment = 0;
263 _special = false;
264 _executable = false;
265 }
266 }
267
268 static size_t noaccess_prefix_size(size_t alignment) {
269 return lcm(os::vm_page_size(), alignment);
270 }
271
272 void ReservedHeapSpace::establish_noaccess_prefix() {
273 assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
274 _noaccess_prefix = noaccess_prefix_size(_alignment);
275
296 _size -= _noaccess_prefix;
297 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
298 }
299
300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
302 // might still fulfill the wishes of the caller.
303 // Assures the memory is aligned to 'alignment'.
304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
305 void ReservedHeapSpace::try_reserve_heap(size_t size,
306 size_t alignment,
307 bool large,
308 char* requested_address) {
309 if (_base != NULL) {
310 // We tried before, but we didn't like the address delivered.
311 release();
312 }
313
314 // If OS doesn't support demand paging for large page memory, we need
315 // to use reserve_memory_special() to reserve and pin the entire region.
316 bool special = large && !os::can_commit_large_page_memory();
317 char* base = NULL;
318
319 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
320 " heap of size " SIZE_FORMAT_HEX,
321 p2i(requested_address),
322 size);
323
324 if (special) {
325 base = os::reserve_memory_special(size, alignment, requested_address, false);
326
327 if (base != NULL) {
328 // Check alignment constraints.
329 assert((uintptr_t) base % alignment == 0,
330 "Large pages returned a non-aligned address, base: "
331 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
332 p2i(base), alignment);
333 _special = true;
334 }
335 }
336
337 if (base == NULL) {
338 // Failed; try to reserve regular memory below
339 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
340 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
341 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
342 }
343
344 // Optimistically assume that the OSes returns an aligned base pointer.
345 // When reserving a large address range, most OSes seem to align to at
346 // least 64K.
347
348 // If the memory was requested at a particular address, use
349 // os::attempt_reserve_memory_at() to avoid over mapping something
350 // important. If available space is not detected, return NULL.
351
352 if (requested_address != 0) {
353 base = os::attempt_reserve_memory_at(size, requested_address);
354 } else {
355 base = os::reserve_memory(size, NULL, alignment);
356 }
357 }
358 if (base == NULL) { return; }
359
360 // Done
361 _base = base;
362 _size = size;
363 _alignment = alignment;
364
365 // Check alignment constraints
366 if ((((size_t)base) & (alignment - 1)) != 0) {
367 // Base not aligned, retry.
368 release();
369 }
370 }
371
372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
373 char *lowest_start,
374 size_t attach_point_alignment,
375 char *aligned_heap_base_min_address,
376 char *upper_bound,
377 size_t size,
378 size_t alignment,
379 bool large) {
380 const size_t attach_range = highest_start - lowest_start;
381 // Cap num_attempts at possible number.
382 // At least one is possible even for 0 sized attach range.
383 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
539 char** addresses = get_attach_addresses_for_disjoint_mode();
540 int i = 0;
541 while (addresses[i] && // End of array not yet reached.
542 ((_base == NULL) || // No previous try succeeded.
543 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
544 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
545 char* const attach_point = addresses[i];
546 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
547 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
548 i++;
549 }
550
551 // Last, desperate try without any placement.
552 if (_base == NULL) {
553 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
554 initialize(size + noaccess_prefix, alignment, large, NULL, false);
555 }
556 }
557 }
558
559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
560
561 if (size == 0) {
562 return;
563 }
564
565 // Heap size should be aligned to alignment, too.
566 guarantee(is_size_aligned(size, alignment), "set by caller");
567
568 if (UseCompressedOops) {
569 initialize_compressed_heap(size, alignment, large);
570 if (_size > size) {
571 // We allocated heap with noaccess prefix.
572 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
573 // if we had to try at arbitrary address.
574 establish_noaccess_prefix();
575 }
576 } else {
577 initialize(size, alignment, large, NULL, false);
578 }
579
580 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
581 "area must be distinguishable from marks for mark-sweep");
582 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
583 "area must be distinguishable from marks for mark-sweep");
584
585 if (base() > 0) {
586 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
587 }
588 }
589
590 // Reserve space for code segment. Same as Java heap only we mark this as
591 // executable.
592 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
593 size_t rs_align,
594 bool large) :
595 ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
596 MemTracker::record_virtual_memory_type((address)base(), mtCode);
597 }
598
599 // VirtualSpace
600
601 VirtualSpace::VirtualSpace() {
602 _low_boundary = NULL;
603 _high_boundary = NULL;
604 _low = NULL;
605 _high = NULL;
606 _lower_high = NULL;
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeCacheExtensions.hpp"
27 #include "logging/log.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "memory/virtualspace.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "services/memTracker.hpp"
33
34 // ReservedSpace
35
36 // Dummy constructor
37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
38 _alignment(0), _special(false), _executable(false), _backing_fd(-1) {
39 }
40
41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) {
42 bool has_preferred_page_size = preferred_page_size != 0;
43 // Want to use large pages where possible and pad with small pages.
44 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
45 bool large_pages = page_size != (size_t)os::vm_page_size();
46 size_t alignment;
47 if (large_pages && has_preferred_page_size) {
48 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
49 // ReservedSpace initialization requires size to be aligned to the given
50 // alignment. Align the size up.
51 size = align_size_up(size, alignment);
52 } else {
53 // Don't force the alignment to be large page aligned,
54 // since that will waste memory.
55 alignment = os::vm_allocation_granularity();
56 }
57 initialize(size, alignment, large_pages, NULL, false);
58 }
59
60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
61 bool large,
62 char* requested_address) : _backing_fd(-1) {
63 initialize(size, alignment, large, requested_address, false);
64 }
65
66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
67 bool large,
68 bool executable) : _backing_fd(-1) {
69 initialize(size, alignment, large, NULL, executable);
70 }
71
72 // Helper method.
73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
74 const size_t size, bool special, bool is_file_mapped= false)
75 {
76 if (base == requested_address || requested_address == NULL)
77 return false; // did not fail
78
79 if (base != NULL) {
80 // Different reserve address may be acceptable in other cases
81 // but for compressed oops heap should be at requested address.
82 assert(UseCompressedOops, "currently requested address used only for compressed oops");
83 log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
84 // OS ignored requested address. Try different address.
85 if (special) {
86 if (!os::release_memory_special(base, size)) {
87 fatal("os::release_memory_special failed");
88 }
89 } else {
90 if (is_file_mapped) {
91 if (!os::unmap_memory(base, size)) {
92 fatal("os::release_memory failed");
93 }
94 } else {
95 if (!os::release_memory(base, size)) {
96 fatal("os::release_memory failed");
97 }
98 }
99 }
100 }
101 return true;
102 }
103
104 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
105 char* requested_address,
106 bool executable) {
107 const size_t granularity = os::vm_allocation_granularity();
108 assert((size & (granularity - 1)) == 0,
109 "size not aligned to os::vm_allocation_granularity()");
110 assert((alignment & (granularity - 1)) == 0,
111 "alignment not aligned to os::vm_allocation_granularity()");
112 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
113 "not a power of 2");
114
115 alignment = MAX2(alignment, (size_t)os::vm_page_size());
116
117 _base = NULL;
118 _size = 0;
119 _special = false;
120 _executable = executable;
121 _alignment = 0;
122 _noaccess_prefix = 0;
123 if (size == 0) {
124 return;
125 }
126
127 // If OS doesn't support demand paging for large page memory, we need
128 // to use reserve_memory_special() to reserve and pin the entire region.
129 // If there is a backing file directory for this VirtualSpace then whether
130 // large pages are allocated is upto the filesystem the dir resides in.
131 // So we ignore the UseLargePages flag in this case.
132 bool special = large && !os::can_commit_large_page_memory();
133 if (special && _backing_fd != -1) {
134 special = false;
135 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
136 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
137 log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option.");
138 }
139 }
140 char* base = NULL;
141
142 if (special) {
143
144 base = os::reserve_memory_special(size, alignment, requested_address, executable);
145
146 if (base != NULL) {
147 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
148 // OS ignored requested address. Try different address.
149 return;
150 }
151 // Check alignment constraints.
152 assert((uintptr_t) base % alignment == 0,
153 "Large pages returned a non-aligned address, base: "
154 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
155 p2i(base), alignment);
156 _special = true;
157 } else {
158 // failed; try to reserve regular memory below
159 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
160 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
161 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
162 }
163 }
164 }
165
166 if (base == NULL) {
167 // Optimistically assume that the OSes returns an aligned base pointer.
168 // When reserving a large address range, most OSes seem to align to at
169 // least 64K.
170
171 // If the memory was requested at a particular address, use
172 // os::attempt_reserve_memory_at() to avoid over mapping something
173 // important. If available space is not detected, return NULL.
174
175 if (requested_address != 0) {
176 base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
177 if (failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) {
178 // OS ignored requested address. Try different address.
179 base = NULL;
180 }
181 } else {
182 base = os::reserve_memory(size, NULL, alignment, _backing_fd);
183 }
184
185 if (base == NULL) return;
186
187 // Check alignment constraints
188 if ((((size_t)base) & (alignment - 1)) != 0) {
189 // Base not aligned, retry
190 if (_backing_fd != -1) {
191 // unmap_memory will do extra work esp. in Windows
192 if (!os::unmap_memory(base, size)) fatal("os::release_memory failed");
193 } else {
194 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
195 }
196 // Make sure that size is aligned
197 size = align_size_up(size, alignment);
198 base = os::reserve_memory_aligned(size, alignment, _backing_fd);
199
200 if (requested_address != 0 &&
201 failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) {
202 // As a result of the alignment constraints, the allocated base differs
203 // from the requested address. Return back to the caller who can
204 // take remedial action (like try again without a requested address).
205 assert(_base == NULL, "should be");
206 return;
207 }
208 }
209 }
210 // Done
211 _base = base;
212 _size = size;
213 _alignment = alignment;
214 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
215 if (_backing_fd != -1) {
216 _special = true;
217 }
218 }
219
220
221 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
222 bool special, bool executable) {
223 assert((size % os::vm_allocation_granularity()) == 0,
224 "size not allocation aligned");
225 _base = base;
226 _size = size;
227 _alignment = alignment;
228 _noaccess_prefix = 0;
229 _special = special;
230 _executable = executable;
231 }
232
233
234 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
235 bool split, bool realloc) {
236 assert(partition_size <= size(), "partition failed");
237 if (split) {
260 size_t ReservedSpace::page_align_size_down(size_t size) {
261 return align_size_down(size, os::vm_page_size());
262 }
263
264
265 size_t ReservedSpace::allocation_align_size_up(size_t size) {
266 return align_size_up(size, os::vm_allocation_granularity());
267 }
268
269
270 size_t ReservedSpace::allocation_align_size_down(size_t size) {
271 return align_size_down(size, os::vm_allocation_granularity());
272 }
273
274
275 void ReservedSpace::release() {
276 if (is_reserved()) {
277 char *real_base = _base - _noaccess_prefix;
278 const size_t real_size = _size + _noaccess_prefix;
279 if (special()) {
280 if (_backing_fd != -1) {
281 os::unmap_memory(real_base, real_size);
282 }
283 else {
284 os::release_memory_special(real_base, real_size);
285 }
286 } else{
287 os::release_memory(real_base, real_size);
288 }
289 _base = NULL;
290 _size = 0;
291 _noaccess_prefix = 0;
292 _alignment = 0;
293 _special = false;
294 _executable = false;
295 }
296 }
297
298 static size_t noaccess_prefix_size(size_t alignment) {
299 return lcm(os::vm_page_size(), alignment);
300 }
301
302 void ReservedHeapSpace::establish_noaccess_prefix() {
303 assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
304 _noaccess_prefix = noaccess_prefix_size(_alignment);
305
326 _size -= _noaccess_prefix;
327 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
328 }
329
330 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
331 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
332 // might still fulfill the wishes of the caller.
333 // Assures the memory is aligned to 'alignment'.
334 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
335 void ReservedHeapSpace::try_reserve_heap(size_t size,
336 size_t alignment,
337 bool large,
338 char* requested_address) {
339 if (_base != NULL) {
340 // We tried before, but we didn't like the address delivered.
341 release();
342 }
343
344 // If OS doesn't support demand paging for large page memory, we need
345 // to use reserve_memory_special() to reserve and pin the entire region.
346 // If there is a backing file directory for this VirtualSpace then whether
347 // large pages are allocated is upto the filesystem the dir resides in.
348 // So we ignore the UseLargePages flag in this case.
349 bool special = large && !os::can_commit_large_page_memory();
350 if (special && _backing_fd != -1) {
351 special = false;
352 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
353 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
354 log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option.");
355 }
356 }
357 char* base = NULL;
358
359 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
360 " heap of size " SIZE_FORMAT_HEX,
361 p2i(requested_address),
362 size);
363
364 if (special) {
365 base = os::reserve_memory_special(size, alignment, requested_address, false);
366
367 if (base != NULL) {
368 // Check alignment constraints.
369 assert((uintptr_t) base % alignment == 0,
370 "Large pages returned a non-aligned address, base: "
371 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
372 p2i(base), alignment);
373 _special = true;
374 }
375 }
376
377 if (base == NULL) {
378 // Failed; try to reserve regular memory below
379 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
380 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
381 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
382 }
383
384 // Optimistically assume that the OSes returns an aligned base pointer.
385 // When reserving a large address range, most OSes seem to align to at
386 // least 64K.
387
388 // If the memory was requested at a particular address, use
389 // os::attempt_reserve_memory_at() to avoid over mapping something
390 // important. If available space is not detected, return NULL.
391
392 if (requested_address != 0) {
393 base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
394 } else {
395 base = os::reserve_memory(size, NULL, alignment, _backing_fd);
396 }
397 }
398 if (base == NULL) { return; }
399
400 // Done
401 _base = base;
402 _size = size;
403 _alignment = alignment;
404 // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
405 if (_backing_fd != -1) {
406 _special = true;
407 }
408
409 // Check alignment constraints
410 if ((((size_t)base) & (alignment - 1)) != 0) {
411 // Base not aligned, retry.
412 release();
413 }
414 }
415
416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
417 char *lowest_start,
418 size_t attach_point_alignment,
419 char *aligned_heap_base_min_address,
420 char *upper_bound,
421 size_t size,
422 size_t alignment,
423 bool large) {
424 const size_t attach_range = highest_start - lowest_start;
425 // Cap num_attempts at possible number.
426 // At least one is possible even for 0 sized attach range.
427 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
583 char** addresses = get_attach_addresses_for_disjoint_mode();
584 int i = 0;
585 while (addresses[i] && // End of array not yet reached.
586 ((_base == NULL) || // No previous try succeeded.
587 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
588 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
589 char* const attach_point = addresses[i];
590 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
591 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
592 i++;
593 }
594
595 // Last, desperate try without any placement.
596 if (_base == NULL) {
597 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
598 initialize(size + noaccess_prefix, alignment, large, NULL, false);
599 }
600 }
601 }
602
603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backing_fs_for_heap) : ReservedSpace() {
604
605 if (size == 0) {
606 return;
607 }
608
609 if (backing_fs_for_heap != NULL) {
610 _backing_fd = os::create_file_for_heap(backing_fs_for_heap, size);
611 if (_backing_fd == -1) {
612 vm_exit_during_initialization(
613 err_msg("Could not create file for Heap at location %s", backing_fs_for_heap));
614 }
615 }
616
617 // Heap size should be aligned to alignment, too.
618 guarantee(is_size_aligned(size, alignment), "set by caller");
619
620 if (UseCompressedOops) {
621 initialize_compressed_heap(size, alignment, large);
622 if (_size > size) {
623 // We allocated heap with noaccess prefix.
624 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
625 // if we had to try at arbitrary address.
626 establish_noaccess_prefix();
627 }
628 } else {
629 initialize(size, alignment, large, NULL, false);
630 }
631
632 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
633 "area must be distinguishable from marks for mark-sweep");
634 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
635 "area must be distinguishable from marks for mark-sweep");
636
637 if (base() > 0) {
638 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
639 }
640
641 if (backing_fs_for_heap != NULL) {
642 os::close(_backing_fd);
643 }
644 }
645
646 // Reserve space for code segment. Same as Java heap only we mark this as
647 // executable.
648 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
649 size_t rs_align,
650 bool large) :
651 ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
652 MemTracker::record_virtual_memory_type((address)base(), mtCode);
653 }
654
655 // VirtualSpace
656
657 VirtualSpace::VirtualSpace() {
658 _low_boundary = NULL;
659 _high_boundary = NULL;
660 _low = NULL;
661 _high = NULL;
662 _lower_high = NULL;
|