< prev index next >

src/share/vm/runtime/virtualspace.cpp

Print this page
rev 7602 : 8064457: Introduce compressed oops mode disjoint base and improve compressed heap handling.


  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 // Helper method.
  64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  65                                            const size_t size, bool special)
  66 {
  67   if (base == requested_address || requested_address == NULL)
  68     return false; // did not fail
  69 
  70   if (base != NULL) {
  71     // Different reserve address may be acceptable in other cases
  72     // but for compressed oops heap should be at requested address.
  73     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  74     if (PrintCompressedOopsMode) {
  75       tty->cr();
  76       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  77     }
  78     // OS ignored requested address. Try different address.
  79     if (special) {
  80       if (!os::release_memory_special(base, size)) {
  81         fatal("os::release_memory_special failed");
  82       }
  83     } else {
  84       if (!os::release_memory(base, size)) {
  85         fatal("os::release_memory failed");
  86       }
  87     }
  88   }
  89   return true;
  90 }
  91 
  92 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  93                                char* requested_address,
  94                                const size_t noaccess_prefix,
  95                                bool executable) {
  96   const size_t granularity = os::vm_allocation_granularity();
  97   assert((size & (granularity - 1)) == 0,
  98          "size not aligned to os::vm_allocation_granularity()");
  99   assert((alignment & (granularity - 1)) == 0,
 100          "alignment not aligned to os::vm_allocation_granularity()");
 101   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 102          "not a power of 2");
 103 
 104   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 105 
 106   // Assert that if noaccess_prefix is used, it is the same as alignment.
 107   assert(noaccess_prefix == 0 ||
 108          noaccess_prefix == alignment, "noaccess prefix wrong");
 109 
 110   _base = NULL;
 111   _size = 0;
 112   _special = false;
 113   _executable = executable;
 114   _alignment = 0;
 115   _noaccess_prefix = 0;
 116   if (size == 0) {
 117     return;
 118   }
 119 
 120   // If OS doesn't support demand paging for large page memory, we need
 121   // to use reserve_memory_special() to reserve and pin the entire region.
 122   bool special = large && !os::can_commit_large_page_memory();
 123   char* base = NULL;
 124 
 125   if (requested_address != 0) {
 126     requested_address -= noaccess_prefix; // adjust requested address
 127     assert(requested_address != NULL, "huge noaccess prefix?");
 128   }
 129 
 130   if (special) {
 131 
 132     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 133 
 134     if (base != NULL) {
 135       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 136         // OS ignored requested address. Try different address.
 137         return;
 138       }
 139       // Check alignment constraints.
 140       assert((uintptr_t) base % alignment == 0,
 141              err_msg("Large pages returned a non-aligned address, base: "
 142                  PTR_FORMAT " alignment: " PTR_FORMAT,
 143                  base, (void*)(uintptr_t)alignment));
 144       _special = true;
 145     } else {
 146       // failed; try to reserve regular memory below
 147       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 148                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 149         if (PrintCompressedOopsMode) {


 159     // When reserving a large address range, most OSes seem to align to at
 160     // least 64K.
 161 
 162     // If the memory was requested at a particular address, use
 163     // os::attempt_reserve_memory_at() to avoid over mapping something
 164     // important.  If available space is not detected, return NULL.
 165 
 166     if (requested_address != 0) {
 167       base = os::attempt_reserve_memory_at(size, requested_address);
 168       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 169         // OS ignored requested address. Try different address.
 170         base = NULL;
 171       }
 172     } else {
 173       base = os::reserve_memory(size, NULL, alignment);
 174     }
 175 
 176     if (base == NULL) return;
 177 
 178     // Check alignment constraints
 179     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 180       // Base not aligned, retry
 181       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 182       // Make sure that size is aligned
 183       size = align_size_up(size, alignment);
 184       base = os::reserve_memory_aligned(size, alignment);
 185 
 186       if (requested_address != 0 &&
 187           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 188         // As a result of the alignment constraints, the allocated base differs
 189         // from the requested address. Return back to the caller who can
 190         // take remedial action (like try again without a requested address).
 191         assert(_base == NULL, "should be");
 192         return;
 193       }
 194     }
 195   }
 196   // Done
 197   _base = base;
 198   _size = size;
 199   _alignment = alignment;
 200   _noaccess_prefix = noaccess_prefix;
 201 
 202   // Assert that if noaccess_prefix is used, it is the same as alignment.
 203   assert(noaccess_prefix == 0 ||
 204          noaccess_prefix == _alignment, "noaccess prefix wrong");
 205 
 206   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 207          "area must be distinguishable from marks for mark-sweep");
 208   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 209          "area must be distinguishable from marks for mark-sweep");
 210 }
 211 
 212 
 213 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 214                              bool special, bool executable) {
 215   assert((size % os::vm_allocation_granularity()) == 0,
 216          "size not allocation aligned");
 217   _base = base;
 218   _size = size;
 219   _alignment = alignment;
 220   _noaccess_prefix = 0;
 221   _special = special;
 222   _executable = executable;
 223 }
 224 
 225 
 226 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 227                                         bool split, bool realloc) {
 228   assert(partition_size <= size(), "partition failed");
 229   if (split) {


 259 }
 260 
 261 
 262 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 263   return align_size_down(size, os::vm_allocation_granularity());
 264 }
 265 
 266 
 267 void ReservedSpace::release() {
 268   if (is_reserved()) {
 269     char *real_base = _base - _noaccess_prefix;
 270     const size_t real_size = _size + _noaccess_prefix;
 271     if (special()) {
 272       os::release_memory_special(real_base, real_size);
 273     } else{
 274       os::release_memory(real_base, real_size);
 275     }
 276     _base = NULL;
 277     _size = 0;
 278     _noaccess_prefix = 0;

 279     _special = false;
 280     _executable = false;
 281   }
 282 }
 283 
 284 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 285   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 286                                       (Universe::narrow_oop_base() != NULL) &&
 287                                       Universe::narrow_oop_use_implicit_null_checks()),
 288          "noaccess_prefix should be used only with non zero based compressed oops");
 289 
 290   // If there is no noaccess prefix, return.
 291   if (_noaccess_prefix == 0) return;
 292 
 293   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 294          "must be at least page size big");
 295 








 296   // Protect memory at the base of the allocated region.
 297   // If special, the page was committed (only matters on windows)
 298   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 299                           _special)) {
 300     fatal("cannot protect protection page");
 301   }
 302   if (PrintCompressedOopsMode) {
 303     tty->cr();
 304     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);






 305   }
 306 
 307   _base += _noaccess_prefix;
 308   _size -= _noaccess_prefix;
 309   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 310          "must be exactly of required size and alignment");
 311 }
 312 
 313 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 314                                      bool large, char* requested_address) :
 315   ReservedSpace(size, alignment, large,
 316                 requested_address,
 317                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 318                  Universe::narrow_oop_use_implicit_null_checks()) ?
 319                   lcm(os::vm_page_size(), alignment) : 0) {

























































































































































































































































































 320   if (base() > 0) {
 321     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 322   }
 323 
 324   // Only reserved space for the java heap should have a noaccess_prefix
 325   // if using compressed oops.
 326   protect_noaccess_prefix(size);
 327 }
 328 
 329 // Reserve space for code segment.  Same as Java heap only we mark this as
 330 // executable.
 331 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 332                                      size_t rs_align,
 333                                      bool large) :
 334   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 335   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 336 }
 337 
 338 // VirtualSpace
 339 
 340 VirtualSpace::VirtualSpace() {
 341   _low_boundary           = NULL;
 342   _high_boundary          = NULL;
 343   _low                    = NULL;
 344   _high                   = NULL;
 345   _lower_high             = NULL;
 346   _middle_high            = NULL;


 786       *p = 1;
 787     }
 788   }
 789 
 790   static void release_memory_for_test(ReservedSpace rs) {
 791     if (rs.special()) {
 792       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 793     } else {
 794       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 795     }
 796   }
 797 
 798   static void test_reserved_space1(size_t size, size_t alignment) {
 799     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 800 
 801     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 802 
 803     ReservedSpace rs(size,          // size
 804                      alignment,     // alignment
 805                      UseLargePages, // large
 806                      NULL,          // requested_address
 807                      0);            // noacces_prefix
 808 
 809     test_log(" rs.special() == %d", rs.special());
 810 
 811     assert(rs.base() != NULL, "Must be");
 812     assert(rs.size() == size, "Must be");
 813 
 814     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 815     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 816 
 817     if (rs.special()) {
 818       small_page_write(rs.base(), size);
 819     }
 820 
 821     release_memory_for_test(rs);
 822   }
 823 
 824   static void test_reserved_space2(size_t size) {
 825     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 826 
 827     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");




  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address) {
  52   initialize(size, alignment, large, requested_address, false);


  53 }
  54 
  55 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  56                              bool large,
  57                              bool executable) {
  58   initialize(size, alignment, large, NULL, executable);
  59 }
  60 
  61 // Helper method.
  62 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  63                                            const size_t size, bool special)
  64 {
  65   if (base == requested_address || requested_address == NULL)
  66     return false; // did not fail
  67 
  68   if (base != NULL) {
  69     // Different reserve address may be acceptable in other cases
  70     // but for compressed oops heap should be at requested address.
  71     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  72     if (PrintCompressedOopsMode) {
  73       tty->cr();
  74       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  75     }
  76     // OS ignored requested address. Try different address.
  77     if (special) {
  78       if (!os::release_memory_special(base, size)) {
  79         fatal("os::release_memory_special failed");
  80       }
  81     } else {
  82       if (!os::release_memory(base, size)) {
  83         fatal("os::release_memory failed");
  84       }
  85     }
  86   }
  87   return true;
  88 }
  89 
  90 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  91                                char* requested_address,

  92                                bool executable) {
  93   const size_t granularity = os::vm_allocation_granularity();
  94   assert((size & (granularity - 1)) == 0,
  95          "size not aligned to os::vm_allocation_granularity()");
  96   assert((alignment & (granularity - 1)) == 0,
  97          "alignment not aligned to os::vm_allocation_granularity()");
  98   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
  99          "not a power of 2");
 100 
 101   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 102 




 103   _base = NULL;
 104   _size = 0;
 105   _special = false;
 106   _executable = executable;
 107   _alignment = 0;
 108   _noaccess_prefix = 0;
 109   if (size == 0) {
 110     return;
 111   }
 112 
 113   // If OS doesn't support demand paging for large page memory, we need
 114   // to use reserve_memory_special() to reserve and pin the entire region.
 115   bool special = large && !os::can_commit_large_page_memory();
 116   char* base = NULL;
 117 





 118   if (special) {
 119 
 120     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 121 
 122     if (base != NULL) {
 123       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 124         // OS ignored requested address. Try different address.
 125         return;
 126       }
 127       // Check alignment constraints.
 128       assert((uintptr_t) base % alignment == 0,
 129              err_msg("Large pages returned a non-aligned address, base: "
 130                  PTR_FORMAT " alignment: " PTR_FORMAT,
 131                  base, (void*)(uintptr_t)alignment));
 132       _special = true;
 133     } else {
 134       // failed; try to reserve regular memory below
 135       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 136                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 137         if (PrintCompressedOopsMode) {


 147     // When reserving a large address range, most OSes seem to align to at
 148     // least 64K.
 149 
 150     // If the memory was requested at a particular address, use
 151     // os::attempt_reserve_memory_at() to avoid over mapping something
 152     // important.  If available space is not detected, return NULL.
 153 
 154     if (requested_address != 0) {
 155       base = os::attempt_reserve_memory_at(size, requested_address);
 156       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 157         // OS ignored requested address. Try different address.
 158         base = NULL;
 159       }
 160     } else {
 161       base = os::reserve_memory(size, NULL, alignment);
 162     }
 163 
 164     if (base == NULL) return;
 165 
 166     // Check alignment constraints
 167     if ((((size_t)base) & (alignment - 1)) != 0) {
 168       // Base not aligned, retry
 169       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 170       // Make sure that size is aligned
 171       size = align_size_up(size, alignment);
 172       base = os::reserve_memory_aligned(size, alignment);
 173 
 174       if (requested_address != 0 &&
 175           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 176         // As a result of the alignment constraints, the allocated base differs
 177         // from the requested address. Return back to the caller who can
 178         // take remedial action (like try again without a requested address).
 179         assert(_base == NULL, "should be");
 180         return;
 181       }
 182     }
 183   }
 184   // Done
 185   _base = base;
 186   _size = size;
 187   _alignment = alignment;










 188 }
 189 
 190 
 191 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 192                              bool special, bool executable) {
 193   assert((size % os::vm_allocation_granularity()) == 0,
 194          "size not allocation aligned");
 195   _base = base;
 196   _size = size;
 197   _alignment = alignment;
 198   _noaccess_prefix = 0;
 199   _special = special;
 200   _executable = executable;
 201 }
 202 
 203 
 204 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 205                                         bool split, bool realloc) {
 206   assert(partition_size <= size(), "partition failed");
 207   if (split) {


 237 }
 238 
 239 
 240 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 241   return align_size_down(size, os::vm_allocation_granularity());
 242 }
 243 
 244 
 245 void ReservedSpace::release() {
 246   if (is_reserved()) {
 247     char *real_base = _base - _noaccess_prefix;
 248     const size_t real_size = _size + _noaccess_prefix;
 249     if (special()) {
 250       os::release_memory_special(real_base, real_size);
 251     } else{
 252       os::release_memory(real_base, real_size);
 253     }
 254     _base = NULL;
 255     _size = 0;
 256     _noaccess_prefix = 0;
 257     _alignment = 0;
 258     _special = false;
 259     _executable = false;
 260   }
 261 }
 262 
 263 static size_t noaccess_prefix_size(size_t alignment) {
 264   return lcm(os::vm_page_size(), alignment);
 265 }








 266 
 267 void ReservedHeapSpace::establish_noaccess_prefix() {
 268   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 269   _noaccess_prefix = noaccess_prefix_size(_alignment);
 270 
 271   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 272     if (true
 273         WIN64_ONLY(&& !UseLargePages)
 274         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 275       // Protect memory at the base of the allocated region.
 276       // If special, the page was committed (only matters on windows)
 277       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {

 278         fatal("cannot protect protection page");
 279       }
 280       if (PrintCompressedOopsMode) {
 281         tty->cr();
 282         tty->print_cr("Protected page at the reserved heap base: "
 283                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 284       }
 285       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 286     } else {
 287       Universe::set_narrow_oop_use_implicit_null_checks(false);
 288     }
 289   }
 290 
 291   _base += _noaccess_prefix;
 292   _size -= _noaccess_prefix;
 293   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");

 294 }
 295 
 296 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 297 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 298 // might still fulfill the wishes of the caller.
 299 // Assures the memory is aligned to 'alignment'.
 300 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 301 void ReservedHeapSpace::try_reserve_heap(size_t size,
 302                                          size_t alignment,
 303                                          bool large,
 304                                          char* requested_address) {
 305   if (_base != NULL) {
 306     // We tried before, but we didn't like the address delivered.
 307     release();
 308   }
 309 
 310   // If OS doesn't support demand paging for large page memory, we need
 311   // to use reserve_memory_special() to reserve and pin the entire region.
 312   bool special = large && !os::can_commit_large_page_memory();
 313   char* base = NULL;
 314 
 315   if (PrintCompressedOopsMode && Verbose) {
 316     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
 317                requested_address, (address)size);
 318   }
 319 
 320   if (special) {
 321     base = os::reserve_memory_special(size, alignment, requested_address, false);
 322 
 323     if (base != NULL) {
 324       // Check alignment constraints.
 325       assert((uintptr_t) base % alignment == 0,
 326              err_msg("Large pages returned a non-aligned address, base: "
 327                      PTR_FORMAT " alignment: " PTR_FORMAT,
 328                      base, (void*)(uintptr_t)alignment));
 329       _special = true;
 330     }
 331   }
 332 
 333   if (base == NULL) {
 334     // Failed; try to reserve regular memory below
 335     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 336                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 337       if (PrintCompressedOopsMode) {
 338         tty->cr();
 339         tty->print_cr("Reserve regular memory without large pages.");
 340       }
 341     }
 342 
 343     // Optimistically assume that the OSes returns an aligned base pointer.
 344     // When reserving a large address range, most OSes seem to align to at
 345     // least 64K.
 346 
 347     // If the memory was requested at a particular address, use
 348     // os::attempt_reserve_memory_at() to avoid over mapping something
 349     // important.  If available space is not detected, return NULL.
 350 
 351     if (requested_address != 0) {
 352       base = os::attempt_reserve_memory_at(size, requested_address);
 353     } else {
 354       base = os::reserve_memory(size, NULL, alignment);
 355     }
 356   }
 357   if (base == NULL) { return; }
 358 
 359   // Done
 360   _base = base;
 361   _size = size;
 362   _alignment = alignment;
 363 
 364   // Check alignment constraints
 365   if ((((size_t)base) & (alignment - 1)) != 0) {
 366     // Base not aligned, retry.
 367     release();
 368   }
 369 }
 370 
 371 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 372                                           char *lowest_start,
 373                                           size_t attach_point_alignment,
 374                                           char *aligned_heap_base_min_address,
 375                                           char *upper_bound,
 376                                           size_t size,
 377                                           size_t alignment,
 378                                           bool large) {
 379   const size_t attach_range = highest_start - lowest_start;
 380   // Cap num_attempts at possible number.
 381   // At least one is possible even for 0 sized attach range.
 382   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 383   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 384 
 385   const size_t stepsize = (attach_range == 0) ? // Only one try.
 386     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 387 
 388   // Try attach points from top to bottom.
 389   char* attach_point = highest_start;
 390   while (attach_point >= lowest_start  &&
 391          attach_point <= highest_start &&  // Avoid wrap around.
 392          ((_base == NULL) ||
 393           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 394     try_reserve_heap(size, alignment, large, attach_point);
 395     attach_point -= stepsize;
 396   }
 397 }
 398 
 399 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 400 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 401 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 402 
 403 // Helper for heap allocation. Returns an array with addresses
 404 // (OS-specific) which are suited for disjoint base mode. Array is
 405 // NULL terminated.
 406 static char** get_attach_addresses_for_disjoint_mode() {
 407   static uint64_t addresses[] = {
 408      2 * SIZE_32G,
 409      3 * SIZE_32G,
 410      4 * SIZE_32G,
 411      8 * SIZE_32G,
 412     10 * SIZE_32G,
 413      1 * SIZE_64K * SIZE_32G,
 414      2 * SIZE_64K * SIZE_32G,
 415      3 * SIZE_64K * SIZE_32G,
 416      4 * SIZE_64K * SIZE_32G,
 417     16 * SIZE_64K * SIZE_32G,
 418     32 * SIZE_64K * SIZE_32G,
 419     34 * SIZE_64K * SIZE_32G,
 420     0
 421   };
 422 
 423   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 424   // the array is sorted.
 425   uint i = 0;
 426   while (addresses[i] != 0 &&
 427          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 428     i++;
 429   }
 430   uint start = i;
 431 
 432   // Avoid more steps than requested.
 433   i = 0;
 434   while (addresses[start+i] != 0) {
 435     if (i == HeapSearchSteps) {
 436       addresses[start+i] = 0;
 437       break;
 438     }
 439     i++;
 440   }
 441 
 442   return (char**) &addresses[start];
 443 }
 444 
 445 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 446   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 447             "can not allocate compressed oop heap for this size");
 448   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 449   assert(HeapBaseMinAddress > 0, "sanity");
 450 
 451   const size_t granularity = os::vm_allocation_granularity();
 452   assert((size & (granularity - 1)) == 0,
 453          "size not aligned to os::vm_allocation_granularity()");
 454   assert((alignment & (granularity - 1)) == 0,
 455          "alignment not aligned to os::vm_allocation_granularity()");
 456   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 457          "not a power of 2");
 458 
 459   // The necessary attach point alignment for generated wish addresses.
 460   // This is needed to increase the chance of attaching for mmap and shmat.
 461   const size_t os_attach_point_alignment =
 462     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 463     NOT_AIX(os::vm_allocation_granularity());
 464   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 465 
 466   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 467   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 468     noaccess_prefix_size(alignment) : 0;
 469 
 470   // Attempt to alloc at user-given address.
 471   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 472     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 473     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 474       release();
 475     }
 476   }
 477 
 478   // Keep heap at HeapBaseMinAddress.
 479   if (_base == NULL) {
 480 
 481     // Try to allocate the heap at addresses that allow efficient oop compression.
 482     // Different schemes are tried, in order of decreasing optimization potential.
 483     //
 484     // For this, try_reserve_heap() is called with the desired heap base addresses.
 485     // A call into the os layer to allocate at a given address can return memory
 486     // at a different address than requested.  Still, this might be memory at a useful
 487     // address. try_reserve_heap() always returns this allocated memory, as only here
 488     // the criteria for a good heap are checked.
 489 
 490     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 491     // Give it several tries from top of range to bottom.
 492     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 493 
 494       // Calc address range within we try to attach (range of possible start addresses).
 495       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 496       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
 497       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 498                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 499     }
 500 
 501     // zerobased: Attempt to allocate in the lower 32G.
 502     // But leave room for the compressed class pointers, which is allocated above
 503     // the heap.
 504     char *zerobased_max = (char *)OopEncodingHeapMax;
 505     // For small heaps, save some space for compressed class pointer
 506     // space so it can be decoded with no base.
 507     if (UseCompressedClassPointers && !UseSharedSpaces &&
 508         OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
 509       const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 510       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 511     }
 512 
 513     // Give it several tries from top of range to bottom.
 514     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 515         ((_base == NULL) ||                        // No previous try succeeded.
 516          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 517 
 518       // Calc address range within we try to attach (range of possible start addresses).
 519       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 520       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 521       // "Cannot use int to initialize char*." Introduce aux variable.
 522       char *unscaled_end = (char *)UnscaledOopHeapMax;
 523       unscaled_end -= size;
 524       char *lowest_start = (size < UnscaledOopHeapMax) ?
 525         MAX2(unscaled_end, aligned_heap_base_min_address) : aligned_heap_base_min_address;
 526       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 527       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 528                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 529     }
 530 
 531     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 532     // implement null checks.
 533     noaccess_prefix = noaccess_prefix_size(alignment);
 534 
 535     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 536     char** addresses = get_attach_addresses_for_disjoint_mode();
 537     int i = 0;
 538     while (addresses[i] &&                                 // End of array not yet reached.
 539            ((_base == NULL) ||                             // No previous try succeeded.
 540             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 541              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 542       char* const attach_point = addresses[i];
 543       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 544       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 545       i++;
 546     }
 547 
 548     // Last, desperate try without any placement.
 549     if (_base == NULL) {
 550       if (PrintCompressedOopsMode && Verbose) {
 551         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
 552       }
 553       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 554     }
 555   }
 556 }
 557 
 558 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 559 
 560   if (size == 0) {
 561     return;
 562   }
 563 
 564   // Heap size should be aligned to alignment, too.
 565   guarantee(is_size_aligned(size, alignment), "set by caller");
 566 
 567   if (UseCompressedOops) {
 568     initialize_compressed_heap(size, alignment, large);
 569     if (_size > size) {
 570       // We allocated heap with noaccess prefix.
 571       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 572       // if we had to try at arbitrary address.
 573       establish_noaccess_prefix();
 574     }
 575   } else {
 576     initialize(size, alignment, large, NULL, false);
 577   }
 578 
 579   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 580          "area must be distinguishable from marks for mark-sweep");
 581   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 582          "area must be distinguishable from marks for mark-sweep");
 583 
 584   if (base() > 0) {
 585     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 586   }




 587 }
 588 
 589 // Reserve space for code segment.  Same as Java heap only we mark this as
 590 // executable.
 591 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 592                                      size_t rs_align,
 593                                      bool large) :
 594   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 595   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 596 }
 597 
 598 // VirtualSpace
 599 
 600 VirtualSpace::VirtualSpace() {
 601   _low_boundary           = NULL;
 602   _high_boundary          = NULL;
 603   _low                    = NULL;
 604   _high                   = NULL;
 605   _lower_high             = NULL;
 606   _middle_high            = NULL;


1046       *p = 1;
1047     }
1048   }
1049 
1050   static void release_memory_for_test(ReservedSpace rs) {
1051     if (rs.special()) {
1052       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1053     } else {
1054       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1055     }
1056   }
1057 
1058   static void test_reserved_space1(size_t size, size_t alignment) {
1059     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1060 
1061     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1062 
1063     ReservedSpace rs(size,          // size
1064                      alignment,     // alignment
1065                      UseLargePages, // large
1066                      (char *)NULL); // requested_address

1067 
1068     test_log(" rs.special() == %d", rs.special());
1069 
1070     assert(rs.base() != NULL, "Must be");
1071     assert(rs.size() == size, "Must be");
1072 
1073     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1074     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1075 
1076     if (rs.special()) {
1077       small_page_write(rs.base(), size);
1078     }
1079 
1080     release_memory_for_test(rs);
1081   }
1082 
1083   static void test_reserved_space2(size_t size) {
1084     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1085 
1086     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");


< prev index next >