30 #include "oops/oop.inline.hpp"
31 #include "services/memTracker.hpp"
32
33 // ReservedSpace
34
35 // Dummy constructor
36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
37 _alignment(0), _special(false), _executable(false) {
38 }
39
40 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
41 bool has_preferred_page_size = preferred_page_size != 0;
42 // Want to use large pages where possible and pad with small pages.
43 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
44 bool large_pages = page_size != (size_t)os::vm_page_size();
45 size_t alignment;
46 if (large_pages && has_preferred_page_size) {
47 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
48 // ReservedSpace initialization requires size to be aligned to the given
49 // alignment. Align the size up.
50 size = align_size_up(size, alignment);
51 } else {
52 // Don't force the alignment to be large page aligned,
53 // since that will waste memory.
54 alignment = os::vm_allocation_granularity();
55 }
56 initialize(size, alignment, large_pages, NULL, false);
57 }
58
59 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
60 bool large,
61 char* requested_address) {
62 initialize(size, alignment, large, requested_address, false);
63 }
64
65 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
66 bool large,
67 bool executable) {
68 initialize(size, alignment, large, NULL, executable);
69 }
70
155 // os::attempt_reserve_memory_at() to avoid over mapping something
156 // important. If available space is not detected, return NULL.
157
158 if (requested_address != 0) {
159 base = os::attempt_reserve_memory_at(size, requested_address);
160 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
161 // OS ignored requested address. Try different address.
162 base = NULL;
163 }
164 } else {
165 base = os::reserve_memory(size, NULL, alignment);
166 }
167
168 if (base == NULL) return;
169
170 // Check alignment constraints
171 if ((((size_t)base) & (alignment - 1)) != 0) {
172 // Base not aligned, retry
173 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
174 // Make sure that size is aligned
175 size = align_size_up(size, alignment);
176 base = os::reserve_memory_aligned(size, alignment);
177
178 if (requested_address != 0 &&
179 failed_to_reserve_as_requested(base, requested_address, size, false)) {
180 // As a result of the alignment constraints, the allocated base differs
181 // from the requested address. Return back to the caller who can
182 // take remedial action (like try again without a requested address).
183 assert(_base == NULL, "should be");
184 return;
185 }
186 }
187 }
188 // Done
189 _base = base;
190 _size = size;
191 _alignment = alignment;
192 }
193
194
195 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
209 bool split, bool realloc) {
210 assert(partition_size <= size(), "partition failed");
211 if (split) {
212 os::split_reserved_memory(base(), size(), partition_size, realloc);
213 }
214 ReservedSpace result(base(), partition_size, alignment, special(),
215 executable());
216 return result;
217 }
218
219
220 ReservedSpace
221 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
222 assert(partition_size <= size(), "partition failed");
223 ReservedSpace result(base() + partition_size, size() - partition_size,
224 alignment, special(), executable());
225 return result;
226 }
227
228
229 size_t ReservedSpace::page_align_size_up(size_t size) {
230 return align_size_up(size, os::vm_page_size());
231 }
232
233
234 size_t ReservedSpace::page_align_size_down(size_t size) {
235 return align_size_down(size, os::vm_page_size());
236 }
237
238
239 size_t ReservedSpace::allocation_align_size_up(size_t size) {
240 return align_size_up(size, os::vm_allocation_granularity());
241 }
242
243
244 size_t ReservedSpace::allocation_align_size_down(size_t size) {
245 return align_size_down(size, os::vm_allocation_granularity());
246 }
247
248
249 void ReservedSpace::release() {
250 if (is_reserved()) {
251 char *real_base = _base - _noaccess_prefix;
252 const size_t real_size = _size + _noaccess_prefix;
253 if (special()) {
254 os::release_memory_special(real_base, real_size);
255 } else{
256 os::release_memory(real_base, real_size);
257 }
258 _base = NULL;
259 _size = 0;
260 _noaccess_prefix = 0;
261 _alignment = 0;
262 _special = false;
263 _executable = false;
264 }
265 }
366 // Base not aligned, retry.
367 release();
368 }
369 }
370
371 void ReservedHeapSpace::try_reserve_range(char *highest_start,
372 char *lowest_start,
373 size_t attach_point_alignment,
374 char *aligned_heap_base_min_address,
375 char *upper_bound,
376 size_t size,
377 size_t alignment,
378 bool large) {
379 const size_t attach_range = highest_start - lowest_start;
380 // Cap num_attempts at possible number.
381 // At least one is possible even for 0 sized attach range.
382 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
383 const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
384
385 const size_t stepsize = (attach_range == 0) ? // Only one try.
386 (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
387
388 // Try attach points from top to bottom.
389 char* attach_point = highest_start;
390 while (attach_point >= lowest_start &&
391 attach_point <= highest_start && // Avoid wrap around.
392 ((_base == NULL) ||
393 (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
394 try_reserve_heap(size, alignment, large, attach_point);
395 attach_point -= stepsize;
396 }
397 }
398
399 #define SIZE_64K ((uint64_t) UCONST64( 0x10000))
400 #define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
401 #define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
402
403 // Helper for heap allocation. Returns an array with addresses
404 // (OS-specific) which are suited for disjoint base mode. Array is
405 // NULL terminated.
406 static char** get_attach_addresses_for_disjoint_mode() {
446 guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
447 "can not allocate compressed oop heap for this size");
448 guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
449 assert(HeapBaseMinAddress > 0, "sanity");
450
451 const size_t granularity = os::vm_allocation_granularity();
452 assert((size & (granularity - 1)) == 0,
453 "size not aligned to os::vm_allocation_granularity()");
454 assert((alignment & (granularity - 1)) == 0,
455 "alignment not aligned to os::vm_allocation_granularity()");
456 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
457 "not a power of 2");
458
459 // The necessary attach point alignment for generated wish addresses.
460 // This is needed to increase the chance of attaching for mmap and shmat.
461 const size_t os_attach_point_alignment =
462 AIX_ONLY(SIZE_256M) // Known shm boundary alignment.
463 NOT_AIX(os::vm_allocation_granularity());
464 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
465
466 char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
467 size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
468 noaccess_prefix_size(alignment) : 0;
469
470 // Attempt to alloc at user-given address.
471 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
472 try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
473 if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
474 release();
475 }
476 }
477
478 // Keep heap at HeapBaseMinAddress.
479 if (_base == NULL) {
480
481 // Try to allocate the heap at addresses that allow efficient oop compression.
482 // Different schemes are tried, in order of decreasing optimization potential.
483 //
484 // For this, try_reserve_heap() is called with the desired heap base addresses.
485 // A call into the os layer to allocate at a given address can return memory
486 // at a different address than requested. Still, this might be memory at a useful
487 // address. try_reserve_heap() always returns this allocated memory, as only here
488 // the criteria for a good heap are checked.
489
490 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
491 // Give it several tries from top of range to bottom.
492 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
493
494 // Calc address range within we try to attach (range of possible start addresses).
495 char* const highest_start = align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
496 char* const lowest_start = align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
497 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
498 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
499 }
500
501 // zerobased: Attempt to allocate in the lower 32G.
502 // But leave room for the compressed class pointers, which is allocated above
503 // the heap.
504 char *zerobased_max = (char *)OopEncodingHeapMax;
505 const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
506 // For small heaps, save some space for compressed class pointer
507 // space so it can be decoded with no base.
508 if (UseCompressedClassPointers && !UseSharedSpaces &&
509 OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
510 (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
511 zerobased_max = (char *)OopEncodingHeapMax - class_space;
512 }
513
514 // Give it several tries from top of range to bottom.
515 if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
516 ((_base == NULL) || // No previous try succeeded.
517 (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
518
519 // Calc address range within we try to attach (range of possible start addresses).
520 char *const highest_start = align_ptr_down(zerobased_max - size, attach_point_alignment);
521 // Need to be careful about size being guaranteed to be less
522 // than UnscaledOopHeapMax due to type constraints.
523 char *lowest_start = aligned_heap_base_min_address;
524 uint64_t unscaled_end = UnscaledOopHeapMax - size;
525 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
526 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
527 }
528 lowest_start = align_ptr_up(lowest_start, attach_point_alignment);
529 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
530 aligned_heap_base_min_address, zerobased_max, size, alignment, large);
531 }
532
533 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
534 // implement null checks.
535 noaccess_prefix = noaccess_prefix_size(alignment);
536
537 // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
538 char** addresses = get_attach_addresses_for_disjoint_mode();
539 int i = 0;
540 while (addresses[i] && // End of array not yet reached.
541 ((_base == NULL) || // No previous try succeeded.
542 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
543 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
544 char* const attach_point = addresses[i];
545 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
546 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
547 i++;
548 }
549
550 // Last, desperate try without any placement.
551 if (_base == NULL) {
552 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
553 initialize(size + noaccess_prefix, alignment, large, NULL, false);
554 }
555 }
556 }
557
558 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
559
560 if (size == 0) {
561 return;
562 }
563
564 // Heap size should be aligned to alignment, too.
565 guarantee(is_size_aligned(size, alignment), "set by caller");
566
567 if (UseCompressedOops) {
568 initialize_compressed_heap(size, alignment, large);
569 if (_size > size) {
570 // We allocated heap with noaccess prefix.
571 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
572 // if we had to try at arbitrary address.
573 establish_noaccess_prefix();
574 }
575 } else {
576 initialize(size, alignment, large, NULL, false);
577 }
578
579 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
580 "area must be distinguishable from marks for mark-sweep");
581 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
582 "area must be distinguishable from marks for mark-sweep");
583
584 if (base() > 0) {
585 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
734 }
735 if (committed_middle < middle) {
736 assert(committed_high == 0, "Must be");
737 }
738
739 if (committed_low < lower) {
740 assert(committed_high == 0, "Must be");
741 assert(committed_middle == 0, "Must be");
742 }
743 #endif
744
745 return committed_low + committed_middle + committed_high;
746 }
747
748
749 bool VirtualSpace::contains(const void* p) const {
750 return low() <= (const char*) p && (const char*) p < high();
751 }
752
753 static void pretouch_expanded_memory(void* start, void* end) {
754 assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
755 assert(is_ptr_aligned(end, os::vm_page_size()), "Unexpected alignment");
756
757 os::pretouch_memory(start, end);
758 }
759
760 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
761 if (os::commit_memory(start, size, alignment, executable)) {
762 if (pre_touch || AlwaysPreTouch) {
763 pretouch_expanded_memory(start, start + size);
764 }
765 return true;
766 }
767
768 debug_only(warning(
769 "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
770 " size=" SIZE_FORMAT ", executable=%d) failed",
771 p2i(start), p2i(start + size), size, executable);)
772
773 return false;
774 }
775
1020 static void small_page_write(void* addr, size_t size) {
1021 size_t page_size = os::vm_page_size();
1022
1023 char* end = (char*)addr + size;
1024 for (char* p = (char*)addr; p < end; p += page_size) {
1025 *p = 1;
1026 }
1027 }
1028
1029 static void release_memory_for_test(ReservedSpace rs) {
1030 if (rs.special()) {
1031 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1032 } else {
1033 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1034 }
1035 }
1036
1037 static void test_reserved_space1(size_t size, size_t alignment) {
1038 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1039
1040 assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1041
1042 ReservedSpace rs(size, // size
1043 alignment, // alignment
1044 UseLargePages, // large
1045 (char *)NULL); // requested_address
1046
1047 test_log(" rs.special() == %d", rs.special());
1048
1049 assert(rs.base() != NULL, "Must be");
1050 assert(rs.size() == size, "Must be");
1051
1052 assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1053 assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1054
1055 if (rs.special()) {
1056 small_page_write(rs.base(), size);
1057 }
1058
1059 release_memory_for_test(rs);
1060 }
1061
1062 static void test_reserved_space2(size_t size) {
1063 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1064
1065 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1066
1067 ReservedSpace rs(size);
1068
1069 test_log(" rs.special() == %d", rs.special());
1070
1071 assert(rs.base() != NULL, "Must be");
1072 assert(rs.size() == size, "Must be");
1073
1074 if (rs.special()) {
1075 small_page_write(rs.base(), size);
1076 }
1077
1078 release_memory_for_test(rs);
1079 }
1080
1081 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1082 test_log("test_reserved_space3(%p, %p, %d)",
1083 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1084
1085 if (size < alignment) {
1086 // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1087 assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1088 return;
1089 }
1090
1091 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1092 assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1093
1094 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1095
1096 ReservedSpace rs(size, alignment, large, false);
1097
1098 test_log(" rs.special() == %d", rs.special());
1099
1100 assert(rs.base() != NULL, "Must be");
1101 assert(rs.size() == size, "Must be");
1102
1103 if (rs.special()) {
1104 small_page_write(rs.base(), size);
1105 }
1106
1107 release_memory_for_test(rs);
1108 }
1109
1110
1111 static void test_reserved_space1() {
1112 size_t size = 2 * 1024 * 1024;
1227 }
1228 }
1229
1230 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1231 switch(mode) {
1232 default:
1233 case Default:
1234 case Reserve:
1235 return vs.initialize(rs, 0);
1236 case Disable:
1237 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1238 case Commit:
1239 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1240 }
1241 }
1242
1243 public:
1244 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1245 TestLargePages mode = Default) {
1246 size_t granularity = os::vm_allocation_granularity();
1247 size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1248
1249 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1250
1251 assert(reserved.is_reserved(), "Must be");
1252
1253 VirtualSpace vs;
1254 bool initialized = initialize_virtual_space(vs, reserved, mode);
1255 assert(initialized, "Failed to initialize VirtualSpace");
1256
1257 vs.expand_by(commit_size, false);
1258
1259 if (vs.special()) {
1260 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1261 } else {
1262 assert_ge(vs.actual_committed_size(), commit_size);
1263 // Approximate the commit granularity.
1264 // Make sure that we don't commit using large pages
1265 // if large pages has been disabled for this VirtualSpace.
1266 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1267 os::vm_page_size() : os::large_page_size();
|
30 #include "oops/oop.inline.hpp"
31 #include "services/memTracker.hpp"
32
33 // ReservedSpace
34
35 // Dummy constructor
36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
37 _alignment(0), _special(false), _executable(false) {
38 }
39
40 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
41 bool has_preferred_page_size = preferred_page_size != 0;
42 // Want to use large pages where possible and pad with small pages.
43 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
44 bool large_pages = page_size != (size_t)os::vm_page_size();
45 size_t alignment;
46 if (large_pages && has_preferred_page_size) {
47 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
48 // ReservedSpace initialization requires size to be aligned to the given
49 // alignment. Align the size up.
50 size = align_up(size, alignment);
51 } else {
52 // Don't force the alignment to be large page aligned,
53 // since that will waste memory.
54 alignment = os::vm_allocation_granularity();
55 }
56 initialize(size, alignment, large_pages, NULL, false);
57 }
58
59 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
60 bool large,
61 char* requested_address) {
62 initialize(size, alignment, large, requested_address, false);
63 }
64
65 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
66 bool large,
67 bool executable) {
68 initialize(size, alignment, large, NULL, executable);
69 }
70
155 // os::attempt_reserve_memory_at() to avoid over mapping something
156 // important. If available space is not detected, return NULL.
157
158 if (requested_address != 0) {
159 base = os::attempt_reserve_memory_at(size, requested_address);
160 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
161 // OS ignored requested address. Try different address.
162 base = NULL;
163 }
164 } else {
165 base = os::reserve_memory(size, NULL, alignment);
166 }
167
168 if (base == NULL) return;
169
170 // Check alignment constraints
171 if ((((size_t)base) & (alignment - 1)) != 0) {
172 // Base not aligned, retry
173 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
174 // Make sure that size is aligned
175 size = align_up(size, alignment);
176 base = os::reserve_memory_aligned(size, alignment);
177
178 if (requested_address != 0 &&
179 failed_to_reserve_as_requested(base, requested_address, size, false)) {
180 // As a result of the alignment constraints, the allocated base differs
181 // from the requested address. Return back to the caller who can
182 // take remedial action (like try again without a requested address).
183 assert(_base == NULL, "should be");
184 return;
185 }
186 }
187 }
188 // Done
189 _base = base;
190 _size = size;
191 _alignment = alignment;
192 }
193
194
195 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
209 bool split, bool realloc) {
210 assert(partition_size <= size(), "partition failed");
211 if (split) {
212 os::split_reserved_memory(base(), size(), partition_size, realloc);
213 }
214 ReservedSpace result(base(), partition_size, alignment, special(),
215 executable());
216 return result;
217 }
218
219
220 ReservedSpace
221 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
222 assert(partition_size <= size(), "partition failed");
223 ReservedSpace result(base() + partition_size, size() - partition_size,
224 alignment, special(), executable());
225 return result;
226 }
227
228
229 size_t ReservedSpace::page_align_up(size_t size) {
230 return align_up(size, os::vm_page_size());
231 }
232
233
234 size_t ReservedSpace::page_align_down(size_t size) {
235 return align_down(size, os::vm_page_size());
236 }
237
238
239 size_t ReservedSpace::allocation_align_up(size_t size) {
240 return align_up(size, os::vm_allocation_granularity());
241 }
242
243
244 size_t ReservedSpace::allocation_align_down(size_t size) {
245 return align_down(size, os::vm_allocation_granularity());
246 }
247
248
249 void ReservedSpace::release() {
250 if (is_reserved()) {
251 char *real_base = _base - _noaccess_prefix;
252 const size_t real_size = _size + _noaccess_prefix;
253 if (special()) {
254 os::release_memory_special(real_base, real_size);
255 } else{
256 os::release_memory(real_base, real_size);
257 }
258 _base = NULL;
259 _size = 0;
260 _noaccess_prefix = 0;
261 _alignment = 0;
262 _special = false;
263 _executable = false;
264 }
265 }
366 // Base not aligned, retry.
367 release();
368 }
369 }
370
371 void ReservedHeapSpace::try_reserve_range(char *highest_start,
372 char *lowest_start,
373 size_t attach_point_alignment,
374 char *aligned_heap_base_min_address,
375 char *upper_bound,
376 size_t size,
377 size_t alignment,
378 bool large) {
379 const size_t attach_range = highest_start - lowest_start;
380 // Cap num_attempts at possible number.
381 // At least one is possible even for 0 sized attach range.
382 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
383 const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
384
385 const size_t stepsize = (attach_range == 0) ? // Only one try.
386 (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
387
388 // Try attach points from top to bottom.
389 char* attach_point = highest_start;
390 while (attach_point >= lowest_start &&
391 attach_point <= highest_start && // Avoid wrap around.
392 ((_base == NULL) ||
393 (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
394 try_reserve_heap(size, alignment, large, attach_point);
395 attach_point -= stepsize;
396 }
397 }
398
399 #define SIZE_64K ((uint64_t) UCONST64( 0x10000))
400 #define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
401 #define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
402
403 // Helper for heap allocation. Returns an array with addresses
404 // (OS-specific) which are suited for disjoint base mode. Array is
405 // NULL terminated.
406 static char** get_attach_addresses_for_disjoint_mode() {
446 guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
447 "can not allocate compressed oop heap for this size");
448 guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
449 assert(HeapBaseMinAddress > 0, "sanity");
450
451 const size_t granularity = os::vm_allocation_granularity();
452 assert((size & (granularity - 1)) == 0,
453 "size not aligned to os::vm_allocation_granularity()");
454 assert((alignment & (granularity - 1)) == 0,
455 "alignment not aligned to os::vm_allocation_granularity()");
456 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
457 "not a power of 2");
458
459 // The necessary attach point alignment for generated wish addresses.
460 // This is needed to increase the chance of attaching for mmap and shmat.
461 const size_t os_attach_point_alignment =
462 AIX_ONLY(SIZE_256M) // Known shm boundary alignment.
463 NOT_AIX(os::vm_allocation_granularity());
464 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
465
466 char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
467 size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
468 noaccess_prefix_size(alignment) : 0;
469
470 // Attempt to alloc at user-given address.
471 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
472 try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
473 if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
474 release();
475 }
476 }
477
478 // Keep heap at HeapBaseMinAddress.
479 if (_base == NULL) {
480
481 // Try to allocate the heap at addresses that allow efficient oop compression.
482 // Different schemes are tried, in order of decreasing optimization potential.
483 //
484 // For this, try_reserve_heap() is called with the desired heap base addresses.
485 // A call into the os layer to allocate at a given address can return memory
486 // at a different address than requested. Still, this might be memory at a useful
487 // address. try_reserve_heap() always returns this allocated memory, as only here
488 // the criteria for a good heap are checked.
489
490 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
491 // Give it several tries from top of range to bottom.
492 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
493
494 // Calc address range within we try to attach (range of possible start addresses).
495 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
496 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
497 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
498 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
499 }
500
501 // zerobased: Attempt to allocate in the lower 32G.
502 // But leave room for the compressed class pointers, which is allocated above
503 // the heap.
504 char *zerobased_max = (char *)OopEncodingHeapMax;
505 const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
506 // For small heaps, save some space for compressed class pointer
507 // space so it can be decoded with no base.
508 if (UseCompressedClassPointers && !UseSharedSpaces &&
509 OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
510 (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
511 zerobased_max = (char *)OopEncodingHeapMax - class_space;
512 }
513
514 // Give it several tries from top of range to bottom.
515 if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
516 ((_base == NULL) || // No previous try succeeded.
517 (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
518
519 // Calc address range within we try to attach (range of possible start addresses).
520 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
521 // Need to be careful about size being guaranteed to be less
522 // than UnscaledOopHeapMax due to type constraints.
523 char *lowest_start = aligned_heap_base_min_address;
524 uint64_t unscaled_end = UnscaledOopHeapMax - size;
525 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
526 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
527 }
528 lowest_start = align_up(lowest_start, attach_point_alignment);
529 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
530 aligned_heap_base_min_address, zerobased_max, size, alignment, large);
531 }
532
533 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
534 // implement null checks.
535 noaccess_prefix = noaccess_prefix_size(alignment);
536
537 // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
538 char** addresses = get_attach_addresses_for_disjoint_mode();
539 int i = 0;
540 while (addresses[i] && // End of array not yet reached.
541 ((_base == NULL) || // No previous try succeeded.
542 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
543 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
544 char* const attach_point = addresses[i];
545 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
546 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
547 i++;
548 }
549
550 // Last, desperate try without any placement.
551 if (_base == NULL) {
552 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
553 initialize(size + noaccess_prefix, alignment, large, NULL, false);
554 }
555 }
556 }
557
558 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
559
560 if (size == 0) {
561 return;
562 }
563
564 // Heap size should be aligned to alignment, too.
565 guarantee(is_aligned(size, alignment), "set by caller");
566
567 if (UseCompressedOops) {
568 initialize_compressed_heap(size, alignment, large);
569 if (_size > size) {
570 // We allocated heap with noaccess prefix.
571 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
572 // if we had to try at arbitrary address.
573 establish_noaccess_prefix();
574 }
575 } else {
576 initialize(size, alignment, large, NULL, false);
577 }
578
579 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
580 "area must be distinguishable from marks for mark-sweep");
581 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
582 "area must be distinguishable from marks for mark-sweep");
583
584 if (base() > 0) {
585 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
734 }
735 if (committed_middle < middle) {
736 assert(committed_high == 0, "Must be");
737 }
738
739 if (committed_low < lower) {
740 assert(committed_high == 0, "Must be");
741 assert(committed_middle == 0, "Must be");
742 }
743 #endif
744
745 return committed_low + committed_middle + committed_high;
746 }
747
748
749 bool VirtualSpace::contains(const void* p) const {
750 return low() <= (const char*) p && (const char*) p < high();
751 }
752
753 static void pretouch_expanded_memory(void* start, void* end) {
754 assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
755 assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment");
756
757 os::pretouch_memory(start, end);
758 }
759
760 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
761 if (os::commit_memory(start, size, alignment, executable)) {
762 if (pre_touch || AlwaysPreTouch) {
763 pretouch_expanded_memory(start, start + size);
764 }
765 return true;
766 }
767
768 debug_only(warning(
769 "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
770 " size=" SIZE_FORMAT ", executable=%d) failed",
771 p2i(start), p2i(start + size), size, executable);)
772
773 return false;
774 }
775
1020 static void small_page_write(void* addr, size_t size) {
1021 size_t page_size = os::vm_page_size();
1022
1023 char* end = (char*)addr + size;
1024 for (char* p = (char*)addr; p < end; p += page_size) {
1025 *p = 1;
1026 }
1027 }
1028
1029 static void release_memory_for_test(ReservedSpace rs) {
1030 if (rs.special()) {
1031 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1032 } else {
1033 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1034 }
1035 }
1036
1037 static void test_reserved_space1(size_t size, size_t alignment) {
1038 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1039
1040 assert(is_aligned(size, alignment), "Incorrect input parameters");
1041
1042 ReservedSpace rs(size, // size
1043 alignment, // alignment
1044 UseLargePages, // large
1045 (char *)NULL); // requested_address
1046
1047 test_log(" rs.special() == %d", rs.special());
1048
1049 assert(rs.base() != NULL, "Must be");
1050 assert(rs.size() == size, "Must be");
1051
1052 assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1053 assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1054
1055 if (rs.special()) {
1056 small_page_write(rs.base(), size);
1057 }
1058
1059 release_memory_for_test(rs);
1060 }
1061
1062 static void test_reserved_space2(size_t size) {
1063 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1064
1065 assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1066
1067 ReservedSpace rs(size);
1068
1069 test_log(" rs.special() == %d", rs.special());
1070
1071 assert(rs.base() != NULL, "Must be");
1072 assert(rs.size() == size, "Must be");
1073
1074 if (rs.special()) {
1075 small_page_write(rs.base(), size);
1076 }
1077
1078 release_memory_for_test(rs);
1079 }
1080
1081 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1082 test_log("test_reserved_space3(%p, %p, %d)",
1083 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1084
1085 if (size < alignment) {
1086 // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1087 assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1088 return;
1089 }
1090
1091 assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1092 assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1093
1094 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1095
1096 ReservedSpace rs(size, alignment, large, false);
1097
1098 test_log(" rs.special() == %d", rs.special());
1099
1100 assert(rs.base() != NULL, "Must be");
1101 assert(rs.size() == size, "Must be");
1102
1103 if (rs.special()) {
1104 small_page_write(rs.base(), size);
1105 }
1106
1107 release_memory_for_test(rs);
1108 }
1109
1110
1111 static void test_reserved_space1() {
1112 size_t size = 2 * 1024 * 1024;
1227 }
1228 }
1229
1230 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1231 switch(mode) {
1232 default:
1233 case Default:
1234 case Reserve:
1235 return vs.initialize(rs, 0);
1236 case Disable:
1237 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1238 case Commit:
1239 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1240 }
1241 }
1242
1243 public:
1244 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1245 TestLargePages mode = Default) {
1246 size_t granularity = os::vm_allocation_granularity();
1247 size_t reserve_size_aligned = align_up(reserve_size, granularity);
1248
1249 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1250
1251 assert(reserved.is_reserved(), "Must be");
1252
1253 VirtualSpace vs;
1254 bool initialized = initialize_virtual_space(vs, reserved, mode);
1255 assert(initialized, "Failed to initialize VirtualSpace");
1256
1257 vs.expand_by(commit_size, false);
1258
1259 if (vs.special()) {
1260 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1261 } else {
1262 assert_ge(vs.actual_committed_size(), commit_size);
1263 // Approximate the commit granularity.
1264 // Make sure that we don't commit using large pages
1265 // if large pages has been disabled for this VirtualSpace.
1266 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1267 os::vm_page_size() : os::large_page_size();
|