103 "size not aligned to os::vm_allocation_granularity()");
104 assert((alignment & (granularity - 1)) == 0,
105 "alignment not aligned to os::vm_allocation_granularity()");
106 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
107 "not a power of 2");
108
109 alignment = MAX2(alignment, (size_t)os::vm_page_size());
110
111 _base = NULL;
112 _size = 0;
113 _special = false;
114 _executable = executable;
115 _alignment = 0;
116 _noaccess_prefix = 0;
117 if (size == 0) {
118 return;
119 }
120
121 // If OS doesn't support demand paging for large page memory, we need
122 // to use reserve_memory_special() to reserve and pin the entire region.
123 bool special = large && !os::can_commit_large_page_memory();
124 char* base = NULL;
125
126 if (special) {
127
128 base = os::reserve_memory_special(size, alignment, requested_address, executable);
129
130 if (base != NULL) {
131 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
132 // OS ignored requested address. Try different address.
133 return;
134 }
135 // Check alignment constraints.
136 assert((uintptr_t) base % alignment == 0,
137 "Large pages returned a non-aligned address, base: "
138 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
139 p2i(base), alignment);
140 _special = true;
141 } else {
142 // failed; try to reserve regular memory below
143 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
173 // Base not aligned, retry
174 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
175 // Make sure that size is aligned
176 size = align_size_up(size, alignment);
177 base = os::reserve_memory_aligned(size, alignment);
178
179 if (requested_address != 0 &&
180 failed_to_reserve_as_requested(base, requested_address, size, false)) {
181 // As a result of the alignment constraints, the allocated base differs
182 // from the requested address. Return back to the caller who can
183 // take remedial action (like try again without a requested address).
184 assert(_base == NULL, "should be");
185 return;
186 }
187 }
188 }
189 // Done
190 _base = base;
191 _size = size;
192 _alignment = alignment;
193 }
194
195
196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
197 bool special, bool executable) {
198 assert((size % os::vm_allocation_granularity()) == 0,
199 "size not allocation aligned");
200 _base = base;
201 _size = size;
202 _alignment = alignment;
203 _noaccess_prefix = 0;
204 _special = special;
205 _executable = executable;
206 }
207
208
209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
210 bool split, bool realloc) {
211 assert(partition_size <= size(), "partition failed");
212 if (split) {
296 _size -= _noaccess_prefix;
297 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
298 }
299
300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
302 // might still fulfill the wishes of the caller.
303 // Assures the memory is aligned to 'alignment'.
304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
305 void ReservedHeapSpace::try_reserve_heap(size_t size,
306 size_t alignment,
307 bool large,
308 char* requested_address) {
309 if (_base != NULL) {
310 // We tried before, but we didn't like the address delivered.
311 release();
312 }
313
314 // If OS doesn't support demand paging for large page memory, we need
315 // to use reserve_memory_special() to reserve and pin the entire region.
316 bool special = large && !os::can_commit_large_page_memory();
317 char* base = NULL;
318
319 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
320 " heap of size " SIZE_FORMAT_HEX,
321 p2i(requested_address),
322 size);
323
324 if (special) {
325 base = os::reserve_memory_special(size, alignment, requested_address, false);
326
327 if (base != NULL) {
328 // Check alignment constraints.
329 assert((uintptr_t) base % alignment == 0,
330 "Large pages returned a non-aligned address, base: "
331 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
332 p2i(base), alignment);
333 _special = true;
334 }
335 }
336
349 // os::attempt_reserve_memory_at() to avoid over mapping something
350 // important. If available space is not detected, return NULL.
351
352 if (requested_address != 0) {
353 base = os::attempt_reserve_memory_at(size, requested_address);
354 } else {
355 base = os::reserve_memory(size, NULL, alignment);
356 }
357 }
358 if (base == NULL) { return; }
359
360 // Done
361 _base = base;
362 _size = size;
363 _alignment = alignment;
364
365 // Check alignment constraints
366 if ((((size_t)base) & (alignment - 1)) != 0) {
367 // Base not aligned, retry.
368 release();
369 }
370 }
371
372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
373 char *lowest_start,
374 size_t attach_point_alignment,
375 char *aligned_heap_base_min_address,
376 char *upper_bound,
377 size_t size,
378 size_t alignment,
379 bool large) {
380 const size_t attach_range = highest_start - lowest_start;
381 // Cap num_attempts at possible number.
382 // At least one is possible even for 0 sized attach range.
383 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
384 const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
385
386 const size_t stepsize = (attach_range == 0) ? // Only one try.
387 (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
388
539 char** addresses = get_attach_addresses_for_disjoint_mode();
540 int i = 0;
541 while (addresses[i] && // End of array not yet reached.
542 ((_base == NULL) || // No previous try succeeded.
543 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
544 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
545 char* const attach_point = addresses[i];
546 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
547 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
548 i++;
549 }
550
551 // Last, desperate try without any placement.
552 if (_base == NULL) {
553 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
554 initialize(size + noaccess_prefix, alignment, large, NULL, false);
555 }
556 }
557 }
558
559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
560
561 if (size == 0) {
562 return;
563 }
564
565 // Heap size should be aligned to alignment, too.
566 guarantee(is_size_aligned(size, alignment), "set by caller");
567
568 if (UseCompressedOops) {
569 initialize_compressed_heap(size, alignment, large);
570 if (_size > size) {
571 // We allocated heap with noaccess prefix.
572 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
573 // if we had to try at arbitrary address.
574 establish_noaccess_prefix();
575 }
576 } else {
577 initialize(size, alignment, large, NULL, false);
578 }
579
580 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
581 "area must be distinguishable from marks for mark-sweep");
582 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
583 "area must be distinguishable from marks for mark-sweep");
584
|
103 "size not aligned to os::vm_allocation_granularity()");
104 assert((alignment & (granularity - 1)) == 0,
105 "alignment not aligned to os::vm_allocation_granularity()");
106 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
107 "not a power of 2");
108
109 alignment = MAX2(alignment, (size_t)os::vm_page_size());
110
111 _base = NULL;
112 _size = 0;
113 _special = false;
114 _executable = executable;
115 _alignment = 0;
116 _noaccess_prefix = 0;
117 if (size == 0) {
118 return;
119 }
120
121 // If OS doesn't support demand paging for large page memory, we need
122 // to use reserve_memory_special() to reserve and pin the entire region.
123 // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
124 // So we ignore the UseLargePages flag in this case.
125 bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
126 char* base = NULL;
127
128 if (special) {
129
130 base = os::reserve_memory_special(size, alignment, requested_address, executable);
131
132 if (base != NULL) {
133 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
134 // OS ignored requested address. Try different address.
135 return;
136 }
137 // Check alignment constraints.
138 assert((uintptr_t) base % alignment == 0,
139 "Large pages returned a non-aligned address, base: "
140 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
141 p2i(base), alignment);
142 _special = true;
143 } else {
144 // failed; try to reserve regular memory below
145 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
175 // Base not aligned, retry
176 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
177 // Make sure that size is aligned
178 size = align_size_up(size, alignment);
179 base = os::reserve_memory_aligned(size, alignment);
180
181 if (requested_address != 0 &&
182 failed_to_reserve_as_requested(base, requested_address, size, false)) {
183 // As a result of the alignment constraints, the allocated base differs
184 // from the requested address. Return back to the caller who can
185 // take remedial action (like try again without a requested address).
186 assert(_base == NULL, "should be");
187 return;
188 }
189 }
190 }
191 // Done
192 _base = base;
193 _size = size;
194 _alignment = alignment;
195
196 if (_backingFileDir != NULL) {
197 // At this point a virtual address range is reserved, now map this memory to a file
198 os::map_memory_to_file(base, size, _backingFileDir);
199 // mark this virtual space as _special because the physical memory is committed.
200 _special = true;
201 }
202 }
203
204
205 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
206 bool special, bool executable) {
207 assert((size % os::vm_allocation_granularity()) == 0,
208 "size not allocation aligned");
209 _base = base;
210 _size = size;
211 _alignment = alignment;
212 _noaccess_prefix = 0;
213 _special = special;
214 _executable = executable;
215 }
216
217
218 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
219 bool split, bool realloc) {
220 assert(partition_size <= size(), "partition failed");
221 if (split) {
305 _size -= _noaccess_prefix;
306 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
307 }
308
309 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
310 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
311 // might still fulfill the wishes of the caller.
312 // Assures the memory is aligned to 'alignment'.
313 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
314 void ReservedHeapSpace::try_reserve_heap(size_t size,
315 size_t alignment,
316 bool large,
317 char* requested_address) {
318 if (_base != NULL) {
319 // We tried before, but we didn't like the address delivered.
320 release();
321 }
322
323 // If OS doesn't support demand paging for large page memory, we need
324 // to use reserve_memory_special() to reserve and pin the entire region.
325 // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
326 // So we ignore the UseLargePages flag in this case.
327 bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
328 char* base = NULL;
329
330 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
331 " heap of size " SIZE_FORMAT_HEX,
332 p2i(requested_address),
333 size);
334
335 if (special) {
336 base = os::reserve_memory_special(size, alignment, requested_address, false);
337
338 if (base != NULL) {
339 // Check alignment constraints.
340 assert((uintptr_t) base % alignment == 0,
341 "Large pages returned a non-aligned address, base: "
342 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
343 p2i(base), alignment);
344 _special = true;
345 }
346 }
347
360 // os::attempt_reserve_memory_at() to avoid over mapping something
361 // important. If available space is not detected, return NULL.
362
363 if (requested_address != 0) {
364 base = os::attempt_reserve_memory_at(size, requested_address);
365 } else {
366 base = os::reserve_memory(size, NULL, alignment);
367 }
368 }
369 if (base == NULL) { return; }
370
371 // Done
372 _base = base;
373 _size = size;
374 _alignment = alignment;
375
376 // Check alignment constraints
377 if ((((size_t)base) & (alignment - 1)) != 0) {
378 // Base not aligned, retry.
379 release();
380 return;
381 }
382 if (_backingFileDir != NULL) {
383 // At this point a virtual address range is reserved, now map this memory to a file
384 os::map_memory_to_file(base, size, _backingFileDir);
385 // mark this virtual space as _special because the physical memory is committed.
386 _special = true;
387 }
388 }
389
390 void ReservedHeapSpace::try_reserve_range(char *highest_start,
391 char *lowest_start,
392 size_t attach_point_alignment,
393 char *aligned_heap_base_min_address,
394 char *upper_bound,
395 size_t size,
396 size_t alignment,
397 bool large) {
398 const size_t attach_range = highest_start - lowest_start;
399 // Cap num_attempts at possible number.
400 // At least one is possible even for 0 sized attach range.
401 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
402 const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
403
404 const size_t stepsize = (attach_range == 0) ? // Only one try.
405 (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
406
557 char** addresses = get_attach_addresses_for_disjoint_mode();
558 int i = 0;
559 while (addresses[i] && // End of array not yet reached.
560 ((_base == NULL) || // No previous try succeeded.
561 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
562 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
563 char* const attach_point = addresses[i];
564 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
565 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
566 i++;
567 }
568
569 // Last, desperate try without any placement.
570 if (_base == NULL) {
571 log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
572 initialize(size + noaccess_prefix, alignment, large, NULL, false);
573 }
574 }
575 }
576
577 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
578
579 if (size == 0) {
580 return;
581 }
582
583 _backingFileDir= backingFSforHeap;
584 // Heap size should be aligned to alignment, too.
585 guarantee(is_size_aligned(size, alignment), "set by caller");
586
587 if (UseCompressedOops) {
588 initialize_compressed_heap(size, alignment, large);
589 if (_size > size) {
590 // We allocated heap with noaccess prefix.
591 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
592 // if we had to try at arbitrary address.
593 establish_noaccess_prefix();
594 }
595 } else {
596 initialize(size, alignment, large, NULL, false);
597 }
598
599 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
600 "area must be distinguishable from marks for mark-sweep");
601 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
602 "area must be distinguishable from marks for mark-sweep");
603
|