< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page
rev 52439 : [mq]: webrev.2_reserved_page_size


  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  39     _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1) {
  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1) {
  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  74                              bool special, bool executable) : _fd_for_heap(-1) {
  75   assert((size % os::vm_allocation_granularity()) == 0,
  76          "size not allocation aligned");
  77   _base = base;
  78   _size = size;
  79   _alignment = alignment;
  80   _noaccess_prefix = 0;
  81   _special = special;
  82   _executable = executable;
  83 }
  84 
  85 // Helper method
  86 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  87   if (is_file_mapped) {
  88     if (!os::unmap_memory(base, size)) {
  89       fatal("os::unmap_memory failed");
  90     }
  91   } else if (!os::release_memory(base, size)) {
  92     fatal("os::release_memory failed");
  93   }
  94 }


 100   if (base == requested_address || requested_address == NULL)
 101     return false; // did not fail
 102 
 103   if (base != NULL) {
 104     // Different reserve address may be acceptable in other cases
 105     // but for compressed oops heap should be at requested address.
 106     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 107     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 108     // OS ignored requested address. Try different address.
 109     if (special) {
 110       if (!os::release_memory_special(base, size)) {
 111         fatal("os::release_memory_special failed");
 112       }
 113     } else {
 114       unmap_or_release_memory(base, size, is_file_mapped);
 115     }
 116   }
 117   return true;
 118 }
 119 













 120 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 121                                char* requested_address,
 122                                bool executable) {
 123   const size_t granularity = os::vm_allocation_granularity();
 124   assert((size & (granularity - 1)) == 0,
 125          "size not aligned to os::vm_allocation_granularity()");
 126   assert((alignment & (granularity - 1)) == 0,
 127          "alignment not aligned to os::vm_allocation_granularity()");
 128   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 129          "not a power of 2");
 130 
 131   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 132 
 133   _base = NULL;
 134   _size = 0;
 135   _special = false;
 136   _executable = executable;
 137   _alignment = 0;
 138   _noaccess_prefix = 0;
 139   if (size == 0) {


 211       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 212 
 213       if (requested_address != 0 &&
 214           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 215         // As a result of the alignment constraints, the allocated base differs
 216         // from the requested address. Return back to the caller who can
 217         // take remedial action (like try again without a requested address).
 218         assert(_base == NULL, "should be");
 219         return;
 220       }
 221     }
 222   }
 223   // Done
 224   _base = base;
 225   _size = size;
 226   _alignment = alignment;
 227   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 228   if (_fd_for_heap != -1) {
 229     _special = true;
 230   }


 231 }
 232 
 233 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 234                                         bool split, bool realloc) {
 235   assert(partition_size <= size(), "partition failed");
 236   if (split) {
 237     os::split_reserved_memory(base(), size(), partition_size, realloc);
 238   }
 239   ReservedSpace result(base(), partition_size, alignment, special(),
 240                        executable());
 241   return result;
 242 }
 243 
 244 
 245 ReservedSpace
 246 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 247   assert(partition_size <= size(), "partition failed");
 248   ReservedSpace result(base() + partition_size, size() - partition_size,
 249                        alignment, special(), executable());
 250   return result;


 273 
 274 void ReservedSpace::release() {
 275   if (is_reserved()) {
 276     char *real_base = _base - _noaccess_prefix;
 277     const size_t real_size = _size + _noaccess_prefix;
 278     if (special()) {
 279       if (_fd_for_heap != -1) {
 280         os::unmap_memory(real_base, real_size);
 281       } else {
 282         os::release_memory_special(real_base, real_size);
 283       }
 284     } else{
 285       os::release_memory(real_base, real_size);
 286     }
 287     _base = NULL;
 288     _size = 0;
 289     _noaccess_prefix = 0;
 290     _alignment = 0;
 291     _special = false;
 292     _executable = false;

 293   }
 294 }
 295 
 296 static size_t noaccess_prefix_size(size_t alignment) {
 297   return lcm(os::vm_page_size(), alignment);
 298 }
 299 
 300 void ReservedHeapSpace::establish_noaccess_prefix() {
 301   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 302   _noaccess_prefix = noaccess_prefix_size(_alignment);
 303 
 304   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 305     if (true
 306         WIN64_ONLY(&& !UseLargePages)
 307         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 308       // Protect memory at the base of the allocated region.
 309       // If special, the page was committed (only matters on windows)
 310       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 311         fatal("cannot protect protection page");
 312       }


 382     // Optimistically assume that the OSes returns an aligned base pointer.
 383     // When reserving a large address range, most OSes seem to align to at
 384     // least 64K.
 385 
 386     // If the memory was requested at a particular address, use
 387     // os::attempt_reserve_memory_at() to avoid over mapping something
 388     // important.  If available space is not detected, return NULL.
 389 
 390     if (requested_address != 0) {
 391       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 392     } else {
 393       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 394     }
 395   }
 396   if (base == NULL) { return; }
 397 
 398   // Done
 399   _base = base;
 400   _size = size;
 401   _alignment = alignment;
 402 
 403   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 404   if (_fd_for_heap != -1) {
 405     _special = true;
 406   }

 407 
 408   // Check alignment constraints
 409   if ((((size_t)base) & (alignment - 1)) != 0) {
 410     // Base not aligned, retry.
 411     release();
 412   }
 413 }
 414 
 415 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 416                                           char *lowest_start,
 417                                           size_t attach_point_alignment,
 418                                           char *aligned_heap_base_min_address,
 419                                           char *upper_bound,
 420                                           size_t size,
 421                                           size_t alignment,
 422                                           bool large) {
 423   const size_t attach_range = highest_start - lowest_start;
 424   // Cap num_attempts at possible number.
 425   // At least one is possible even for 0 sized attach range.
 426   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  39     _alignment(0), _special(false), _fd_for_heap(-1), _actual_page_size(0), _executable(false) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _actual_page_size(0) {
  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1), _actual_page_size(0) {
  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1), _actual_page_size(0) {
  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  74                              bool special, bool executable) : _fd_for_heap(-1), _actual_page_size(0) {
  75   assert((size % os::vm_allocation_granularity()) == 0,
  76          "size not allocation aligned");
  77   _base = base;
  78   _size = size;
  79   _alignment = alignment;
  80   _noaccess_prefix = 0;
  81   _special = special;
  82   _executable = executable;
  83 }
  84 
  85 // Helper method
  86 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  87   if (is_file_mapped) {
  88     if (!os::unmap_memory(base, size)) {
  89       fatal("os::unmap_memory failed");
  90     }
  91   } else if (!os::release_memory(base, size)) {
  92     fatal("os::release_memory failed");
  93   }
  94 }


 100   if (base == requested_address || requested_address == NULL)
 101     return false; // did not fail
 102 
 103   if (base != NULL) {
 104     // Different reserve address may be acceptable in other cases
 105     // but for compressed oops heap should be at requested address.
 106     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 107     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 108     // OS ignored requested address. Try different address.
 109     if (special) {
 110       if (!os::release_memory_special(base, size)) {
 111         fatal("os::release_memory_special failed");
 112       }
 113     } else {
 114       unmap_or_release_memory(base, size, is_file_mapped);
 115     }
 116   }
 117   return true;
 118 }
 119 
 120 // Should be called after _special is decided.
 121 void ReservedSpace::update_actual_page_size(bool large_page) {
 122   // There are two ways to manage large page memory in ReservedSpace.
 123   // 1. OS supports committing large page memory.
 124   // 2. OS doesn't support committing large page memory so ReservedSpace manages it specially.
 125   //    When succeeded reserving it, '_special' will be set.
 126   if (large_page && (os::can_commit_large_page_memory() || _special)) {
 127     _actual_page_size = os::large_page_size();
 128   } else {
 129     _actual_page_size = os::vm_page_size();
 130   }
 131 }
 132 
 133 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 134                                char* requested_address,
 135                                bool executable) {
 136   const size_t granularity = os::vm_allocation_granularity();
 137   assert((size & (granularity - 1)) == 0,
 138          "size not aligned to os::vm_allocation_granularity()");
 139   assert((alignment & (granularity - 1)) == 0,
 140          "alignment not aligned to os::vm_allocation_granularity()");
 141   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 142          "not a power of 2");
 143 
 144   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 145 
 146   _base = NULL;
 147   _size = 0;
 148   _special = false;
 149   _executable = executable;
 150   _alignment = 0;
 151   _noaccess_prefix = 0;
 152   if (size == 0) {


 224       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 225 
 226       if (requested_address != 0 &&
 227           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 228         // As a result of the alignment constraints, the allocated base differs
 229         // from the requested address. Return back to the caller who can
 230         // take remedial action (like try again without a requested address).
 231         assert(_base == NULL, "should be");
 232         return;
 233       }
 234     }
 235   }
 236   // Done
 237   _base = base;
 238   _size = size;
 239   _alignment = alignment;
 240   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 241   if (_fd_for_heap != -1) {
 242     _special = true;
 243   }
 244 
 245   update_actual_page_size(large);
 246 }
 247 
 248 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 249                                         bool split, bool realloc) {
 250   assert(partition_size <= size(), "partition failed");
 251   if (split) {
 252     os::split_reserved_memory(base(), size(), partition_size, realloc);
 253   }
 254   ReservedSpace result(base(), partition_size, alignment, special(),
 255                        executable());
 256   return result;
 257 }
 258 
 259 
 260 ReservedSpace
 261 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 262   assert(partition_size <= size(), "partition failed");
 263   ReservedSpace result(base() + partition_size, size() - partition_size,
 264                        alignment, special(), executable());
 265   return result;


 288 
 289 void ReservedSpace::release() {
 290   if (is_reserved()) {
 291     char *real_base = _base - _noaccess_prefix;
 292     const size_t real_size = _size + _noaccess_prefix;
 293     if (special()) {
 294       if (_fd_for_heap != -1) {
 295         os::unmap_memory(real_base, real_size);
 296       } else {
 297         os::release_memory_special(real_base, real_size);
 298       }
 299     } else{
 300       os::release_memory(real_base, real_size);
 301     }
 302     _base = NULL;
 303     _size = 0;
 304     _noaccess_prefix = 0;
 305     _alignment = 0;
 306     _special = false;
 307     _executable = false;
 308     _actual_page_size = 0;
 309   }
 310 }
 311 
 312 static size_t noaccess_prefix_size(size_t alignment) {
 313   return lcm(os::vm_page_size(), alignment);
 314 }
 315 
 316 void ReservedHeapSpace::establish_noaccess_prefix() {
 317   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 318   _noaccess_prefix = noaccess_prefix_size(_alignment);
 319 
 320   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 321     if (true
 322         WIN64_ONLY(&& !UseLargePages)
 323         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 324       // Protect memory at the base of the allocated region.
 325       // If special, the page was committed (only matters on windows)
 326       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 327         fatal("cannot protect protection page");
 328       }


 398     // Optimistically assume that the OSes returns an aligned base pointer.
 399     // When reserving a large address range, most OSes seem to align to at
 400     // least 64K.
 401 
 402     // If the memory was requested at a particular address, use
 403     // os::attempt_reserve_memory_at() to avoid over mapping something
 404     // important.  If available space is not detected, return NULL.
 405 
 406     if (requested_address != 0) {
 407       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 408     } else {
 409       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 410     }
 411   }
 412   if (base == NULL) { return; }
 413 
 414   // Done
 415   _base = base;
 416   _size = size;
 417   _alignment = alignment;

 418   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 419   if (_fd_for_heap != -1) {
 420     _special = true;
 421   }
 422   update_actual_page_size(large);
 423 
 424   // Check alignment constraints
 425   if ((((size_t)base) & (alignment - 1)) != 0) {
 426     // Base not aligned, retry.
 427     release();
 428   }
 429 }
 430 
 431 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 432                                           char *lowest_start,
 433                                           size_t attach_point_alignment,
 434                                           char *aligned_heap_base_min_address,
 435                                           char *upper_bound,
 436                                           size_t size,
 437                                           size_t alignment,
 438                                           bool large) {
 439   const size_t attach_range = highest_start - lowest_start;
 440   // Cap num_attempts at possible number.
 441   // At least one is possible even for 0 sized attach range.
 442   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;


< prev index next >