104 "size not aligned to os::vm_allocation_granularity()");
105 assert((alignment & (granularity - 1)) == 0,
106 "alignment not aligned to os::vm_allocation_granularity()");
107 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
108 "not a power of 2");
109
110 alignment = MAX2(alignment, (size_t)os::vm_page_size());
111
112 _base = NULL;
113 _size = 0;
114 _special = false;
115 _executable = executable;
116 _alignment = 0;
117 _noaccess_prefix = 0;
118 if (size == 0) {
119 return;
120 }
121
122 // If OS doesn't support demand paging for large page memory, we need
123 // to use reserve_memory_special() to reserve and pin the entire region.
124 bool special = large && !os::can_commit_large_page_memory();
125 char* base = NULL;
126
127 if (special) {
128
129 base = os::reserve_memory_special(size, alignment, requested_address, executable);
130
131 if (base != NULL) {
132 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
133 // OS ignored requested address. Try different address.
134 return;
135 }
136 // Check alignment constraints.
137 assert((uintptr_t) base % alignment == 0,
138 "Large pages returned a non-aligned address, base: "
139 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
140 p2i(base), alignment);
141 _special = true;
142 } else {
143 // failed; try to reserve regular memory below
144 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
145 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
146 if (PrintCompressedOopsMode) {
147 tty->cr();
148 tty->print_cr("Reserve regular memory without large pages.");
149 }
150 }
151 }
152 }
153
154 if (base == NULL) {
155 // Optimistically assume that the OSes returns an aligned base pointer.
156 // When reserving a large address range, most OSes seem to align to at
157 // least 64K.
158
159 // If the memory was requested at a particular address, use
160 // os::attempt_reserve_memory_at() to avoid over mapping something
161 // important. If available space is not detected, return NULL.
162
163 if (requested_address != 0) {
164 base = os::attempt_reserve_memory_at(size, requested_address);
165 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
166 // OS ignored requested address. Try different address.
167 base = NULL;
168 }
169 } else {
170 base = os::reserve_memory(size, NULL, alignment);
171 }
172
173 if (base == NULL) return;
174
175 // Check alignment constraints
176 if ((((size_t)base) & (alignment - 1)) != 0) {
177 // Base not aligned, retry
178 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
179 // Make sure that size is aligned
180 size = align_size_up(size, alignment);
181 base = os::reserve_memory_aligned(size, alignment);
182
183 if (requested_address != 0 &&
184 failed_to_reserve_as_requested(base, requested_address, size, false)) {
185 // As a result of the alignment constraints, the allocated base differs
186 // from the requested address. Return back to the caller who can
187 // take remedial action (like try again without a requested address).
188 assert(_base == NULL, "should be");
189 return;
190 }
191 }
192 }
193 // Done
194 _base = base;
195 _size = size;
196 _alignment = alignment;
197 }
198
199
200 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
201 bool special, bool executable) {
202 assert((size % os::vm_allocation_granularity()) == 0,
203 "size not allocation aligned");
204 _base = base;
205 _size = size;
206 _alignment = alignment;
207 _noaccess_prefix = 0;
208 _special = special;
209 _executable = executable;
210 }
211
212
301 _size -= _noaccess_prefix;
302 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
303 }
304
305 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
306 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
307 // might still fulfill the wishes of the caller.
308 // Assures the memory is aligned to 'alignment'.
309 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
310 void ReservedHeapSpace::try_reserve_heap(size_t size,
311 size_t alignment,
312 bool large,
313 char* requested_address) {
314 if (_base != NULL) {
315 // We tried before, but we didn't like the address delivered.
316 release();
317 }
318
319 // If OS doesn't support demand paging for large page memory, we need
320 // to use reserve_memory_special() to reserve and pin the entire region.
321 bool special = large && !os::can_commit_large_page_memory();
322 char* base = NULL;
323
324 if (PrintCompressedOopsMode && Verbose) {
325 tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX ".\n",
326 p2i(requested_address), size);
327 }
328
329 if (special) {
330 base = os::reserve_memory_special(size, alignment, requested_address, false);
331
332 if (base != NULL) {
333 // Check alignment constraints.
334 assert((uintptr_t) base % alignment == 0,
335 "Large pages returned a non-aligned address, base: "
336 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
337 p2i(base), alignment);
338 _special = true;
339 }
340 }
341
342 if (base == NULL) {
343 // Failed; try to reserve regular memory below
344 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
345 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
346 if (PrintCompressedOopsMode) {
347 tty->cr();
348 tty->print_cr("Reserve regular memory without large pages.");
349 }
350 }
351
352 // Optimistically assume that the OSes returns an aligned base pointer.
353 // When reserving a large address range, most OSes seem to align to at
354 // least 64K.
355
356 // If the memory was requested at a particular address, use
357 // os::attempt_reserve_memory_at() to avoid over mapping something
358 // important. If available space is not detected, return NULL.
359
360 if (requested_address != 0) {
361 base = os::attempt_reserve_memory_at(size, requested_address);
362 } else {
363 base = os::reserve_memory(size, NULL, alignment);
364 }
365 }
366 if (base == NULL) { return; }
367
368 // Done
369 _base = base;
370 _size = size;
371 _alignment = alignment;
372
373 // Check alignment constraints
374 if ((((size_t)base) & (alignment - 1)) != 0) {
375 // Base not aligned, retry.
376 release();
377 }
378 }
379
380 void ReservedHeapSpace::try_reserve_range(char *highest_start,
381 char *lowest_start,
382 size_t attach_point_alignment,
383 char *aligned_heap_base_min_address,
384 char *upper_bound,
385 size_t size,
549 while (addresses[i] && // End of array not yet reached.
550 ((_base == NULL) || // No previous try succeeded.
551 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
552 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
553 char* const attach_point = addresses[i];
554 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
555 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
556 i++;
557 }
558
559 // Last, desperate try without any placement.
560 if (_base == NULL) {
561 if (PrintCompressedOopsMode && Verbose) {
562 tty->print("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX ".\n", size + noaccess_prefix);
563 }
564 initialize(size + noaccess_prefix, alignment, large, NULL, false);
565 }
566 }
567 }
568
569 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
570
571 if (size == 0) {
572 return;
573 }
574
575 // Heap size should be aligned to alignment, too.
576 guarantee(is_size_aligned(size, alignment), "set by caller");
577
578 if (UseCompressedOops) {
579 initialize_compressed_heap(size, alignment, large);
580 if (_size > size) {
581 // We allocated heap with noaccess prefix.
582 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
583 // if we had to try at arbitrary address.
584 establish_noaccess_prefix();
585 }
586 } else {
587 initialize(size, alignment, large, NULL, false);
588 }
589
590 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
591 "area must be distinguishable from marks for mark-sweep");
592 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
593 "area must be distinguishable from marks for mark-sweep");
594
|
104 "size not aligned to os::vm_allocation_granularity()");
105 assert((alignment & (granularity - 1)) == 0,
106 "alignment not aligned to os::vm_allocation_granularity()");
107 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
108 "not a power of 2");
109
110 alignment = MAX2(alignment, (size_t)os::vm_page_size());
111
112 _base = NULL;
113 _size = 0;
114 _special = false;
115 _executable = executable;
116 _alignment = 0;
117 _noaccess_prefix = 0;
118 if (size == 0) {
119 return;
120 }
121
122 // If OS doesn't support demand paging for large page memory, we need
123 // to use reserve_memory_special() to reserve and pin the entire region.
124 // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
125 // So we ignore the UseLargePages flag in this case.
126 bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
127 char* base = NULL;
128
129 if (special) {
130
131 base = os::reserve_memory_special(size, alignment, requested_address, executable);
132
133 if (base != NULL) {
134 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
135 // OS ignored requested address. Try different address.
136 return;
137 }
138 // Check alignment constraints.
139 assert((uintptr_t) base % alignment == 0,
140 "Large pages returned a non-aligned address, base: "
141 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
142 p2i(base), alignment);
143 _special = true;
144 } else {
145 // failed; try to reserve regular memory below
146 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
147 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
148 if (PrintCompressedOopsMode) {
149 tty->cr();
150 tty->print_cr("Reserve regular memory without large pages.");
151 }
152 }
153 }
154 }
155
156 if (base == NULL) {
157
158 // If '_backingFileDir' variable is not null, the memory should be backed by a file.
159 // The code path taken in the 'if' block below is very similar to the 'else' block with only different being that it uses a file descriptor (fd) for mmap() calls.
160 // reserve_memory_with_backing_file() encapsulates the different cases which are handled in the 'else' block
161 if (_backingFileDir != NULL) {
162 base = os::reserve_memory_with_backing_file(size, requested_address, alignment, _backingFileDir);
163
164 if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) {
165 // OS ignored requested address. Try different address.
166 base = NULL;
167 return;
168 }
169 }
170 else {
171 // Optimistically assume that the OSes returns an aligned base pointer.
172 // When reserving a large address range, most OSes seem to align to at
173 // least 64K.
174
175 // If the memory was requested at a particular address, use
176 // os::attempt_reserve_memory_at() to avoid over mapping something
177 // important. If available space is not detected, return NULL.
178 if (requested_address != 0) {
179 base = os::attempt_reserve_memory_at(size, requested_address);
180 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
181 // OS ignored requested address. Try different address.
182 base = NULL;
183 }
184 } else {
185 base = os::reserve_memory(size, NULL, alignment);
186 }
187
188 if (base == NULL) return;
189
190 // Check alignment constraints
191 if ((((size_t)base) & (alignment - 1)) != 0) {
192 // Base not aligned, retry
193 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
194 // Make sure that size is aligned
195 size = align_size_up(size, alignment);
196 base = os::reserve_memory_aligned(size, alignment);
197
198 if (requested_address != 0 &&
199 failed_to_reserve_as_requested(base, requested_address, size, false)) {
200 // As a result of the alignment constraints, the allocated base differs
201 // from the requested address. Return back to the caller who can
202 // take remedial action (like try again without a requested address).
203 assert(_base == NULL, "should be");
204 return;
205 }
206 }
207 }
208 }
209 // Done
210 _base = base;
211 _size = size;
212 _alignment = alignment;
213 }
214
215
216 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
217 bool special, bool executable) {
218 assert((size % os::vm_allocation_granularity()) == 0,
219 "size not allocation aligned");
220 _base = base;
221 _size = size;
222 _alignment = alignment;
223 _noaccess_prefix = 0;
224 _special = special;
225 _executable = executable;
226 }
227
228
317 _size -= _noaccess_prefix;
318 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
319 }
320
321 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
322 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
323 // might still fulfill the wishes of the caller.
324 // Assures the memory is aligned to 'alignment'.
325 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
326 void ReservedHeapSpace::try_reserve_heap(size_t size,
327 size_t alignment,
328 bool large,
329 char* requested_address) {
330 if (_base != NULL) {
331 // We tried before, but we didn't like the address delivered.
332 release();
333 }
334
335 // If OS doesn't support demand paging for large page memory, we need
336 // to use reserve_memory_special() to reserve and pin the entire region.
337 bool special = _backingFileDir == NULL && large && !os::can_commit_large_page_memory();
338 char* base = NULL;
339
340 if (PrintCompressedOopsMode && Verbose) {
341 tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX ".\n",
342 p2i(requested_address), size);
343 }
344
345 if (special) {
346 base = os::reserve_memory_special(size, alignment, requested_address, false);
347
348 if (base != NULL) {
349 // Check alignment constraints.
350 assert((uintptr_t) base % alignment == 0,
351 "Large pages returned a non-aligned address, base: "
352 PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
353 p2i(base), alignment);
354 _special = true;
355 }
356 }
357
358 if (base == NULL) {
359 // Failed; try to reserve regular memory below
360 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
361 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
362 if (PrintCompressedOopsMode) {
363 tty->cr();
364 tty->print_cr("Reserve regular memory without large pages.");
365 }
366 }
367
368 // If '_backingFileDir' variable is not null, the memory should be backed by a file.
369 // The code path taken in the 'if' block below is very similar to the 'else' block with only different being that it uses a file descriptor (fd) for mmap() calls.
370 // reserve_memory_with_backing_file() encapsulates the two cases which are handled in the 'else' block
371 if (_backingFileDir != NULL) {
372 base = os::reserve_memory_with_backing_file(size, requested_address, alignment, _backingFileDir);
373 if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) {
374 // OS ignored requested address. Try different address.
375 base = NULL;
376 return;
377 }
378 _special = true;
379 }
380 else {
381
382 // Optimistically assume that the OSes returns an aligned base pointer.
383 // When reserving a large address range, most OSes seem to align to at
384 // least 64K.
385
386 // If the memory was requested at a particular address, use
387 // os::attempt_reserve_memory_at() to avoid over mapping something
388 // important. If available space is not detected, return NULL.
389
390 if (requested_address != 0) {
391 base = os::attempt_reserve_memory_at(size, requested_address);
392 } else {
393 base = os::reserve_memory(size, NULL, alignment);
394 }
395 }
396 }
397 if (base == NULL) { return; }
398
399 // Done
400 _base = base;
401 _size = size;
402 _alignment = alignment;
403
404 // Check alignment constraints
405 if ((((size_t)base) & (alignment - 1)) != 0) {
406 // Base not aligned, retry.
407 release();
408 }
409 }
410
411 void ReservedHeapSpace::try_reserve_range(char *highest_start,
412 char *lowest_start,
413 size_t attach_point_alignment,
414 char *aligned_heap_base_min_address,
415 char *upper_bound,
416 size_t size,
580 while (addresses[i] && // End of array not yet reached.
581 ((_base == NULL) || // No previous try succeeded.
582 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
583 !Universe::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
584 char* const attach_point = addresses[i];
585 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
586 try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
587 i++;
588 }
589
590 // Last, desperate try without any placement.
591 if (_base == NULL) {
592 if (PrintCompressedOopsMode && Verbose) {
593 tty->print("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX ".\n", size + noaccess_prefix);
594 }
595 initialize(size + noaccess_prefix, alignment, large, NULL, false);
596 }
597 }
598 }
599
600 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
601
602 if (size == 0) {
603 return;
604 }
605 _backingFileDir= backingFSforHeap;
606 // Heap size should be aligned to alignment, too.
607 guarantee(is_size_aligned(size, alignment), "set by caller");
608
609 if (UseCompressedOops) {
610 initialize_compressed_heap(size, alignment, large);
611 if (_size > size) {
612 // We allocated heap with noaccess prefix.
613 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
614 // if we had to try at arbitrary address.
615 establish_noaccess_prefix();
616 }
617 } else {
618 initialize(size, alignment, large, NULL, false);
619 }
620
621 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
622 "area must be distinguishable from marks for mark-sweep");
623 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
624 "area must be distinguishable from marks for mark-sweep");
625
|