< prev index next >

src/share/vm/memory/virtualspace.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }


 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   bool special = large && !os::can_commit_large_page_memory();


 124   char* base = NULL;
 125 
 126   if (special) {
 127 
 128     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 129 
 130     if (base != NULL) {
 131       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 132         // OS ignored requested address. Try different address.
 133         return;
 134       }
 135       // Check alignment constraints.
 136       assert((uintptr_t) base % alignment == 0,
 137              "Large pages returned a non-aligned address, base: "
 138              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 139              p2i(base), alignment);
 140       _special = true;
 141     } else {
 142       // failed; try to reserve regular memory below
 143       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 144                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 145         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 146       }
 147     }
 148   }
 149 
 150   if (base == NULL) {
 151     // Optimistically assume that the OSes returns an aligned base pointer.
 152     // When reserving a large address range, most OSes seem to align to at
 153     // least 64K.
 154 
 155     // If the memory was requested at a particular address, use
 156     // os::attempt_reserve_memory_at() to avoid over mapping something
 157     // important.  If available space is not detected, return NULL.
 158 
 159     if (requested_address != 0) {
 160       base = os::attempt_reserve_memory_at(size, requested_address);
 161       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 162         // OS ignored requested address. Try different address.
 163         base = NULL;
 164       }
 165     } else {
 166       base = os::reserve_memory(size, NULL, alignment);
 167     }
 168 
 169     if (base == NULL) return;
 170 
 171     // Check alignment constraints
 172     if ((((size_t)base) & (alignment - 1)) != 0) {
 173       // Base not aligned, retry




 174       if (!os::release_memory(base, size)) fatal("os::release_memory failed");

 175       // Make sure that size is aligned
 176       size = align_size_up(size, alignment);
 177       base = os::reserve_memory_aligned(size, alignment);
 178 
 179       if (requested_address != 0 &&
 180           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 181         // As a result of the alignment constraints, the allocated base differs
 182         // from the requested address. Return back to the caller who can
 183         // take remedial action (like try again without a requested address).
 184         assert(_base == NULL, "should be");
 185         return;
 186       }
 187     }
 188   }
 189   // Done
 190   _base = base;
 191   _size = size;
 192   _alignment = alignment;




 193 }
 194 
 195 
 196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 197                              bool special, bool executable) {
 198   assert((size % os::vm_allocation_granularity()) == 0,
 199          "size not allocation aligned");
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _noaccess_prefix = 0;
 204   _special = special;
 205   _executable = executable;
 206 }
 207 
 208 
 209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 210                                         bool split, bool realloc) {
 211   assert(partition_size <= size(), "partition failed");
 212   if (split) {


 296   _size -= _noaccess_prefix;
 297   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 298 }
 299 
 300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 302 // might still fulfill the wishes of the caller.
 303 // Assures the memory is aligned to 'alignment'.
 304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 305 void ReservedHeapSpace::try_reserve_heap(size_t size,
 306                                          size_t alignment,
 307                                          bool large,
 308                                          char* requested_address) {
 309   if (_base != NULL) {
 310     // We tried before, but we didn't like the address delivered.
 311     release();
 312   }
 313 
 314   // If OS doesn't support demand paging for large page memory, we need
 315   // to use reserve_memory_special() to reserve and pin the entire region.
 316   bool special = large && !os::can_commit_large_page_memory();


 317   char* base = NULL;
 318 
 319   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 320                              " heap of size " SIZE_FORMAT_HEX,
 321                              p2i(requested_address),
 322                              size);
 323 
 324   if (special) {
 325     base = os::reserve_memory_special(size, alignment, requested_address, false);
 326 
 327     if (base != NULL) {
 328       // Check alignment constraints.
 329       assert((uintptr_t) base % alignment == 0,
 330              "Large pages returned a non-aligned address, base: "
 331              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 332              p2i(base), alignment);
 333       _special = true;
 334     }
 335   }
 336 
 337   if (base == NULL) {
 338     // Failed; try to reserve regular memory below
 339     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 340                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 341       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) { return; }
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;




 364 
 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369   }
 370 }
 371 
 372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 373                                           char *lowest_start,
 374                                           size_t attach_point_alignment,
 375                                           char *aligned_heap_base_min_address,
 376                                           char *upper_bound,
 377                                           size_t size,
 378                                           size_t alignment,
 379                                           bool large) {
 380   const size_t attach_range = highest_start - lowest_start;
 381   // Cap num_attempts at possible number.
 382   // At least one is possible even for 0 sized attach range.
 383   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;


 539     char** addresses = get_attach_addresses_for_disjoint_mode();
 540     int i = 0;
 541     while (addresses[i] &&                                 // End of array not yet reached.
 542            ((_base == NULL) ||                             // No previous try succeeded.
 543             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 544              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 545       char* const attach_point = addresses[i];
 546       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 547       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 548       i++;
 549     }
 550 
 551     // Last, desperate try without any placement.
 552     if (_base == NULL) {
 553       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 554       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 555     }
 556   }
 557 }
 558 
 559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 560 
 561   if (size == 0) {
 562     return;
 563   }
 564 




 565   // Heap size should be aligned to alignment, too.
 566   guarantee(is_size_aligned(size, alignment), "set by caller");
 567 
 568   if (UseCompressedOops) {
 569     initialize_compressed_heap(size, alignment, large);
 570     if (_size > size) {
 571       // We allocated heap with noaccess prefix.
 572       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 573       // if we had to try at arbitrary address.
 574       establish_noaccess_prefix();
 575     }
 576   } else {
 577     initialize(size, alignment, large, NULL, false);
 578   }
 579 
 580   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 581          "area must be distinguishable from marks for mark-sweep");
 582   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 583          "area must be distinguishable from marks for mark-sweep");
 584 
 585   if (base() > 0) {
 586     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);




 587   }
 588 }
 589 
 590 // Reserve space for code segment.  Same as Java heap only we mark this as
 591 // executable.
 592 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 593                                      size_t rs_align,
 594                                      bool large) :
 595   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 596   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 597 }
 598 
 599 // VirtualSpace
 600 
 601 VirtualSpace::VirtualSpace() {
 602   _low_boundary           = NULL;
 603   _high_boundary          = NULL;
 604   _low                    = NULL;
 605   _high                   = NULL;
 606   _lower_high             = NULL;




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false), _backing_fd(-1) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) : _backing_fd(-1) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) : _backing_fd(-1) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }


 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 124   // So we ignore the UseLargePages flag in this case.
 125   bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory());
 126   char* base = NULL;
 127 
 128   if (special) {
 129 
 130     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 131 
 132     if (base != NULL) {
 133       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 134         // OS ignored requested address. Try different address.
 135         return;
 136       }
 137       // Check alignment constraints.
 138       assert((uintptr_t) base % alignment == 0,
 139              "Large pages returned a non-aligned address, base: "
 140              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 141              p2i(base), alignment);
 142       _special = true;
 143     } else {
 144       // failed; try to reserve regular memory below
 145       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 146                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 147         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 148       }
 149     }
 150   }
 151 
 152   if (base == NULL) {
 153     // Optimistically assume that the OSes returns an aligned base pointer.
 154     // When reserving a large address range, most OSes seem to align to at
 155     // least 64K.
 156 
 157     // If the memory was requested at a particular address, use
 158     // os::attempt_reserve_memory_at() to avoid over mapping something
 159     // important.  If available space is not detected, return NULL.
 160 
 161     if (requested_address != 0) {
 162       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 163       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 164         // OS ignored requested address. Try different address.
 165         base = NULL;
 166       }
 167     } else {
 168       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 169     }
 170 
 171     if (base == NULL) return;
 172 
 173     // Check alignment constraints
 174     if ((((size_t)base) & (alignment - 1)) != 0) {
 175       // Base not aligned, retry
 176       if (_backing_fd != -1) {
 177         if (!os::unmap_memory(base, size)) fatal("os::release_memory failed");
 178       }
 179       else {
 180         if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 181       }
 182       // Make sure that size is aligned
 183       size = align_size_up(size, alignment);
 184       base = os::reserve_memory_aligned(size, alignment, _backing_fd);
 185 
 186       if (requested_address != 0 &&
 187           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 188         // As a result of the alignment constraints, the allocated base differs
 189         // from the requested address. Return back to the caller who can
 190         // take remedial action (like try again without a requested address).
 191         assert(_base == NULL, "should be");
 192         return;
 193       }
 194     }
 195   }
 196   // Done
 197   _base = base;
 198   _size = size;
 199   _alignment = alignment;
 200   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 201   if (_backing_fd != -1) {
 202     _special = true;
 203   }
 204 }
 205 
 206 
 207 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 208                              bool special, bool executable) {
 209   assert((size % os::vm_allocation_granularity()) == 0,
 210          "size not allocation aligned");
 211   _base = base;
 212   _size = size;
 213   _alignment = alignment;
 214   _noaccess_prefix = 0;
 215   _special = special;
 216   _executable = executable;
 217 }
 218 
 219 
 220 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 221                                         bool split, bool realloc) {
 222   assert(partition_size <= size(), "partition failed");
 223   if (split) {


 307   _size -= _noaccess_prefix;
 308   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 309 }
 310 
 311 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 312 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 313 // might still fulfill the wishes of the caller.
 314 // Assures the memory is aligned to 'alignment'.
 315 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 316 void ReservedHeapSpace::try_reserve_heap(size_t size,
 317                                          size_t alignment,
 318                                          bool large,
 319                                          char* requested_address) {
 320   if (_base != NULL) {
 321     // We tried before, but we didn't like the address delivered.
 322     release();
 323   }
 324 
 325   // If OS doesn't support demand paging for large page memory, we need
 326   // to use reserve_memory_special() to reserve and pin the entire region.
 327   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 328   // So we ignore the UseLargePages flag in this case.
 329   bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory());
 330   char* base = NULL;
 331 
 332   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 333                              " heap of size " SIZE_FORMAT_HEX,
 334                              p2i(requested_address),
 335                              size);
 336 
 337   if (special) {
 338     base = os::reserve_memory_special(size, alignment, requested_address, false);
 339 
 340     if (base != NULL) {
 341       // Check alignment constraints.
 342       assert((uintptr_t) base % alignment == 0,
 343              "Large pages returned a non-aligned address, base: "
 344              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 345              p2i(base), alignment);
 346       _special = true;
 347     }
 348   }
 349 
 350   if (base == NULL) {
 351     // Failed; try to reserve regular memory below
 352     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 353                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 354       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 355     }
 356 
 357     // Optimistically assume that the OSes returns an aligned base pointer.
 358     // When reserving a large address range, most OSes seem to align to at
 359     // least 64K.
 360 
 361     // If the memory was requested at a particular address, use
 362     // os::attempt_reserve_memory_at() to avoid over mapping something
 363     // important.  If available space is not detected, return NULL.
 364 
 365     if (requested_address != 0) {
 366       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 367     } else {
 368       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 369     }
 370   }
 371   if (base == NULL) { return; }
 372 
 373   // Done
 374   _base = base;
 375   _size = size;
 376   _alignment = alignment;
 377   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 378   if (_backing_fd != -1) {
 379     _special = true;
 380   }
 381 
 382   // Check alignment constraints
 383   if ((((size_t)base) & (alignment - 1)) != 0) {
 384     // Base not aligned, retry.
 385     release();
 386   }
 387 }
 388 
 389 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 390                                           char *lowest_start,
 391                                           size_t attach_point_alignment,
 392                                           char *aligned_heap_base_min_address,
 393                                           char *upper_bound,
 394                                           size_t size,
 395                                           size_t alignment,
 396                                           bool large) {
 397   const size_t attach_range = highest_start - lowest_start;
 398   // Cap num_attempts at possible number.
 399   // At least one is possible even for 0 sized attach range.
 400   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;


 556     char** addresses = get_attach_addresses_for_disjoint_mode();
 557     int i = 0;
 558     while (addresses[i] &&                                 // End of array not yet reached.
 559            ((_base == NULL) ||                             // No previous try succeeded.
 560             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 561              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 562       char* const attach_point = addresses[i];
 563       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 564       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 565       i++;
 566     }
 567 
 568     // Last, desperate try without any placement.
 569     if (_base == NULL) {
 570       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 571       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 572     }
 573   }
 574 }
 575 
 576 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
 577 
 578   if (size == 0) {
 579     return;
 580   }
 581 
 582   if (backingFSforHeap != NULL) {
 583     _backing_fd = os::create_file_for_heap(backingFSforHeap, size);
 584   }
 585 
 586   // Heap size should be aligned to alignment, too.
 587   guarantee(is_size_aligned(size, alignment), "set by caller");
 588 
 589   if (UseCompressedOops) {
 590     initialize_compressed_heap(size, alignment, large);
 591     if (_size > size) {
 592       // We allocated heap with noaccess prefix.
 593       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 594       // if we had to try at arbitrary address.
 595       establish_noaccess_prefix();
 596     }
 597   } else {
 598     initialize(size, alignment, large, NULL, false);
 599   }
 600 
 601   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 602          "area must be distinguishable from marks for mark-sweep");
 603   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 604          "area must be distinguishable from marks for mark-sweep");
 605 
 606   if (base() > 0) {
 607     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 608   }
 609 
 610   if (backingFSforHeap != NULL) {
 611     os::close(_backing_fd);
 612   }
 613 }
 614 
 615 // Reserve space for code segment.  Same as Java heap only we mark this as
 616 // executable.
 617 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 618                                      size_t rs_align,
 619                                      bool large) :
 620   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 621   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 622 }
 623 
 624 // VirtualSpace
 625 
 626 VirtualSpace::VirtualSpace() {
 627   _low_boundary           = NULL;
 628   _high_boundary          = NULL;
 629   _low                    = NULL;
 630   _high                   = NULL;
 631   _lower_high             = NULL;


< prev index next >