< prev index next >

src/share/vm/runtime/virtualspace.cpp

Print this page
rev 7386 : 8064457: Introduce compressed oops mode disjoint base and improve compressed heap handling.


  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 // Helper method.
  64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  65                                            const size_t size, bool special)
  66 {
  67   if (base == requested_address || requested_address == NULL)
  68     return false; // did not fail
  69 
  70   if (base != NULL) {
  71     // Different reserve address may be acceptable in other cases
  72     // but for compressed oops heap should be at requested address.
  73     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  74     if (PrintCompressedOopsMode) {
  75       tty->cr();
  76       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  77     }
  78     // OS ignored requested address. Try different address.
  79     if (special) {
  80       if (!os::release_memory_special(base, size)) {
  81         fatal("os::release_memory_special failed");
  82       }
  83     } else {
  84       if (!os::release_memory(base, size)) {
  85         fatal("os::release_memory failed");
  86       }
  87     }
  88   }
  89   return true;
  90 }
  91 
  92 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  93                                char* requested_address,
  94                                const size_t noaccess_prefix,
  95                                bool executable) {
  96   const size_t granularity = os::vm_allocation_granularity();
  97   assert((size & (granularity - 1)) == 0,
  98          "size not aligned to os::vm_allocation_granularity()");
  99   assert((alignment & (granularity - 1)) == 0,
 100          "alignment not aligned to os::vm_allocation_granularity()");
 101   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 102          "not a power of 2");
 103 
 104   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 105 
 106   // Assert that if noaccess_prefix is used, it is the same as alignment.
 107   assert(noaccess_prefix == 0 ||
 108          noaccess_prefix == alignment, "noaccess prefix wrong");
 109 
 110   _base = NULL;
 111   _size = 0;
 112   _special = false;
 113   _executable = executable;
 114   _alignment = 0;
 115   _noaccess_prefix = 0;
 116   if (size == 0) {
 117     return;
 118   }
 119 
 120   // If OS doesn't support demand paging for large page memory, we need
 121   // to use reserve_memory_special() to reserve and pin the entire region.
 122   bool special = large && !os::can_commit_large_page_memory();
 123   char* base = NULL;
 124 
 125   if (requested_address != 0) {
 126     requested_address -= noaccess_prefix; // adjust requested address
 127     assert(requested_address != NULL, "huge noaccess prefix?");
 128   }
 129 
 130   if (special) {
 131 
 132     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 133 
 134     if (base != NULL) {
 135       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 136         // OS ignored requested address. Try different address.
 137         return;
 138       }
 139       // Check alignment constraints.
 140       assert((uintptr_t) base % alignment == 0,
 141              err_msg("Large pages returned a non-aligned address, base: "
 142                  PTR_FORMAT " alignment: " PTR_FORMAT,
 143                  base, (void*)(uintptr_t)alignment));
 144       _special = true;
 145     } else {
 146       // failed; try to reserve regular memory below
 147       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 148                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 149         if (PrintCompressedOopsMode) {


 159     // When reserving a large address range, most OSes seem to align to at
 160     // least 64K.
 161 
 162     // If the memory was requested at a particular address, use
 163     // os::attempt_reserve_memory_at() to avoid over mapping something
 164     // important.  If available space is not detected, return NULL.
 165 
 166     if (requested_address != 0) {
 167       base = os::attempt_reserve_memory_at(size, requested_address);
 168       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 169         // OS ignored requested address. Try different address.
 170         base = NULL;
 171       }
 172     } else {
 173       base = os::reserve_memory(size, NULL, alignment);
 174     }
 175 
 176     if (base == NULL) return;
 177 
 178     // Check alignment constraints
 179     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 180       // Base not aligned, retry
 181       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 182       // Make sure that size is aligned
 183       size = align_size_up(size, alignment);
 184       base = os::reserve_memory_aligned(size, alignment);
 185 
 186       if (requested_address != 0 &&
 187           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 188         // As a result of the alignment constraints, the allocated base differs
 189         // from the requested address. Return back to the caller who can
 190         // take remedial action (like try again without a requested address).
 191         assert(_base == NULL, "should be");
 192         return;
 193       }
 194     }
 195   }
 196   // Done
 197   _base = base;
 198   _size = size;
 199   _alignment = alignment;
 200   _noaccess_prefix = noaccess_prefix;
 201 
 202   // Assert that if noaccess_prefix is used, it is the same as alignment.
 203   assert(noaccess_prefix == 0 ||
 204          noaccess_prefix == _alignment, "noaccess prefix wrong");
 205 
 206   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 207          "area must be distinguishable from marks for mark-sweep");
 208   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 209          "area must be distinguishable from marks for mark-sweep");
 210 }
 211 
 212 
 213 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 214                              bool special, bool executable) {
 215   assert((size % os::vm_allocation_granularity()) == 0,
 216          "size not allocation aligned");
 217   _base = base;
 218   _size = size;
 219   _alignment = alignment;
 220   _noaccess_prefix = 0;
 221   _special = special;
 222   _executable = executable;
 223 }
 224 


 264 }
 265 
 266 
 267 void ReservedSpace::release() {
 268   if (is_reserved()) {
 269     char *real_base = _base - _noaccess_prefix;
 270     const size_t real_size = _size + _noaccess_prefix;
 271     if (special()) {
 272       os::release_memory_special(real_base, real_size);
 273     } else{
 274       os::release_memory(real_base, real_size);
 275     }
 276     _base = NULL;
 277     _size = 0;
 278     _noaccess_prefix = 0;
 279     _special = false;
 280     _executable = false;
 281   }
 282 }
 283 
 284 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 285   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 286                                       (Universe::narrow_oop_base() != NULL) &&
 287                                       Universe::narrow_oop_use_implicit_null_checks()),
 288          "noaccess_prefix should be used only with non zero based compressed oops");
 289 
 290   // If there is no noaccess prefix, return.
 291   if (_noaccess_prefix == 0) return;
 292 
 293   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 294          "must be at least page size big");
 295 



 296   // Protect memory at the base of the allocated region.
 297   // If special, the page was committed (only matters on windows)
 298   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 299                           _special)) {
 300     fatal("cannot protect protection page");
 301   }
 302   if (PrintCompressedOopsMode) {
 303     tty->cr();
 304     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 305   }




 306 
 307   _base += _noaccess_prefix;
 308   _size -= _noaccess_prefix;
 309   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 310          "must be exactly of required size and alignment");
 311 }
 312 
 313 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 314                                      bool large, char* requested_address) :
 315   ReservedSpace(size, alignment, large,
 316                 requested_address,
 317                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 318                  Universe::narrow_oop_use_implicit_null_checks()) ?
 319                   lcm(os::vm_page_size(), alignment) : 0) {























































































































































































































































 320   if (base() > 0) {
 321     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 322   }
 323 
 324   // Only reserved space for the java heap should have a noaccess_prefix
 325   // if using compressed oops.
 326   protect_noaccess_prefix(size);
 327 }
 328 
 329 // Reserve space for code segment.  Same as Java heap only we mark this as
 330 // executable.
 331 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 332                                      size_t rs_align,
 333                                      bool large) :
 334   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 335   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 336 }
 337 
 338 // VirtualSpace
 339 
 340 VirtualSpace::VirtualSpace() {
 341   _low_boundary           = NULL;
 342   _high_boundary          = NULL;
 343   _low                    = NULL;
 344   _high                   = NULL;
 345   _lower_high             = NULL;
 346   _middle_high            = NULL;


 786       *p = 1;
 787     }
 788   }
 789 
 790   static void release_memory_for_test(ReservedSpace rs) {
 791     if (rs.special()) {
 792       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 793     } else {
 794       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 795     }
 796   }
 797 
 798   static void test_reserved_space1(size_t size, size_t alignment) {
 799     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 800 
 801     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 802 
 803     ReservedSpace rs(size,          // size
 804                      alignment,     // alignment
 805                      UseLargePages, // large
 806                      NULL,          // requested_address
 807                      0);            // noacces_prefix
 808 
 809     test_log(" rs.special() == %d", rs.special());
 810 
 811     assert(rs.base() != NULL, "Must be");
 812     assert(rs.size() == size, "Must be");
 813 
 814     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 815     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 816 
 817     if (rs.special()) {
 818       small_page_write(rs.base(), size);
 819     }
 820 
 821     release_memory_for_test(rs);
 822   }
 823 
 824   static void test_reserved_space2(size_t size) {
 825     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 826 
 827     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");




  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address) {
  52   initialize(size, alignment, large, requested_address, false);


  53 }
  54 
  55 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  56                              bool large,
  57                              bool executable) {
  58   initialize(size, alignment, large, NULL, executable);
  59 }
  60 
  61 // Helper method.
  62 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  63                                            const size_t size, bool special)
  64 {
  65   if (base == requested_address || requested_address == NULL)
  66     return false; // did not fail
  67 
  68   if (base != NULL) {
  69     // Different reserve address may be acceptable in other cases
  70     // but for compressed oops heap should be at requested address.
  71     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  72     if (PrintCompressedOopsMode) {
  73       tty->cr();
  74       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  75     }
  76     // OS ignored requested address. Try different address.
  77     if (special) {
  78       if (!os::release_memory_special(base, size)) {
  79         fatal("os::release_memory_special failed");
  80       }
  81     } else {
  82       if (!os::release_memory(base, size)) {
  83         fatal("os::release_memory failed");
  84       }
  85     }
  86   }
  87   return true;
  88 }
  89 
  90 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  91                                char* requested_address,

  92                                bool executable) {
  93   const size_t granularity = os::vm_allocation_granularity();
  94   assert((size & (granularity - 1)) == 0,
  95          "size not aligned to os::vm_allocation_granularity()");
  96   assert((alignment & (granularity - 1)) == 0,
  97          "alignment not aligned to os::vm_allocation_granularity()");
  98   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
  99          "not a power of 2");
 100 
 101   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 102 




 103   _base = NULL;
 104   _size = 0;
 105   _special = false;
 106   _executable = executable;
 107   _alignment = 0;
 108   _noaccess_prefix = 0;
 109   if (size == 0) {
 110     return;
 111   }
 112 
 113   // If OS doesn't support demand paging for large page memory, we need
 114   // to use reserve_memory_special() to reserve and pin the entire region.
 115   bool special = large && !os::can_commit_large_page_memory();
 116   char* base = NULL;
 117 





 118   if (special) {
 119 
 120     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 121 
 122     if (base != NULL) {
 123       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 124         // OS ignored requested address. Try different address.
 125         return;
 126       }
 127       // Check alignment constraints.
 128       assert((uintptr_t) base % alignment == 0,
 129              err_msg("Large pages returned a non-aligned address, base: "
 130                  PTR_FORMAT " alignment: " PTR_FORMAT,
 131                  base, (void*)(uintptr_t)alignment));
 132       _special = true;
 133     } else {
 134       // failed; try to reserve regular memory below
 135       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 136                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 137         if (PrintCompressedOopsMode) {


 147     // When reserving a large address range, most OSes seem to align to at
 148     // least 64K.
 149 
 150     // If the memory was requested at a particular address, use
 151     // os::attempt_reserve_memory_at() to avoid over mapping something
 152     // important.  If available space is not detected, return NULL.
 153 
 154     if (requested_address != 0) {
 155       base = os::attempt_reserve_memory_at(size, requested_address);
 156       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 157         // OS ignored requested address. Try different address.
 158         base = NULL;
 159       }
 160     } else {
 161       base = os::reserve_memory(size, NULL, alignment);
 162     }
 163 
 164     if (base == NULL) return;
 165 
 166     // Check alignment constraints
 167     if ((((size_t)base) & (alignment - 1)) != 0) {
 168       // Base not aligned, retry
 169       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 170       // Make sure that size is aligned
 171       size = align_size_up(size, alignment);
 172       base = os::reserve_memory_aligned(size, alignment);
 173 
 174       if (requested_address != 0 &&
 175           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 176         // As a result of the alignment constraints, the allocated base differs
 177         // from the requested address. Return back to the caller who can
 178         // take remedial action (like try again without a requested address).
 179         assert(_base == NULL, "should be");
 180         return;
 181       }
 182     }
 183   }
 184   // Done
 185   _base = base;
 186   _size = size;
 187   _alignment = alignment;





 188 
 189   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 190          "area must be distinguishable from marks for mark-sweep");
 191   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 192          "area must be distinguishable from marks for mark-sweep");
 193 }
 194 
 195 
 196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 197                              bool special, bool executable) {
 198   assert((size % os::vm_allocation_granularity()) == 0,
 199          "size not allocation aligned");
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _noaccess_prefix = 0;
 204   _special = special;
 205   _executable = executable;
 206 }
 207 


 247 }
 248 
 249 
 250 void ReservedSpace::release() {
 251   if (is_reserved()) {
 252     char *real_base = _base - _noaccess_prefix;
 253     const size_t real_size = _size + _noaccess_prefix;
 254     if (special()) {
 255       os::release_memory_special(real_base, real_size);
 256     } else{
 257       os::release_memory(real_base, real_size);
 258     }
 259     _base = NULL;
 260     _size = 0;
 261     _noaccess_prefix = 0;
 262     _special = false;
 263     _executable = false;
 264   }
 265 }
 266 
 267 static size_t noaccess_prefix_size(size_t alignment) {
 268   return lcm(os::vm_page_size(), alignment);
 269 }


 270 
 271 void ReservedSpace::establish_noaccess_prefix() {
 272   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 273 
 274   // ...
 275   _noaccess_prefix = noaccess_prefix_size(_alignment);
 276 
 277   if (true
 278       WIN64_ONLY(&& !UseLargePages)
 279       AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 280     // Protect memory at the base of the allocated region.
 281     // If special, the page was committed (only matters on windows)
 282     if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {

 283       fatal("cannot protect protection page");
 284     }
 285     if (PrintCompressedOopsMode) {
 286       tty->cr();
 287       tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 288     }
 289     assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 290   } else {
 291     Universe::set_narrow_oop_use_implicit_null_checks(false);
 292   }
 293 
 294   _base += _noaccess_prefix;
 295   _size -= _noaccess_prefix;
 296   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");

 297 }
 298 
 299 
 300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 302 // might still fulfill the wishes of the caller.
 303 // Assures the memory is aligned to 'alignment'.
 304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 305 void ReservedHeapSpace::try_reserve_heap(size_t size, size_t alignment, bool large, char* requested_address) {
 306   if (_base != NULL) {
 307     // We tried before, but we didn't like the address delivered.
 308     release();
 309   }
 310 
 311   // If OS doesn't support demand paging for large page memory, we need
 312   // to use reserve_memory_special() to reserve and pin the entire region.
 313   bool special = large && !os::can_commit_large_page_memory();
 314   char* base = NULL;
 315 
 316   if (PrintCompressedOopsMode && Verbose) {
 317     tty->print("Trying to allocate at address " PTR_FORMAT " size" PTR_FORMAT ".\n",
 318                requested_address, (address)size);
 319   }
 320 
 321   if (special) {
 322     base = os::reserve_memory_special(size, alignment, requested_address, false);
 323 
 324     if (base != NULL) {
 325       // Check alignment constraints.
 326       assert((uintptr_t) base % alignment == 0,
 327              err_msg("Large pages returned a non-aligned address, base: "
 328                      PTR_FORMAT " alignment: " PTR_FORMAT,
 329                      base, (void*)(uintptr_t)alignment));
 330       _special = true;
 331     }
 332   }
 333 
 334   if (!base) {
 335     // Failed; try to reserve regular memory below
 336     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 337                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 338       if (PrintCompressedOopsMode) {
 339         tty->cr();
 340         tty->print_cr("Reserve regular memory without large pages.");
 341       }
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) return;
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;
 364 
 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369     return;
 370   }
 371 }
 372 
 373 void ReservedHeapSpace::initialize_compressed_heap(size_t size, size_t alignment, bool large) {
 374   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 375             "can not allocate compressed oop heap for this size");
 376   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 377   assert(HeapBaseMinAddress > 0, "sanity");
 378 
 379   const size_t granularity = os::vm_allocation_granularity();
 380   assert((size & (granularity - 1)) == 0,
 381          "size not aligned to os::vm_allocation_granularity()");
 382   assert((alignment & (granularity - 1)) == 0,
 383          "alignment not aligned to os::vm_allocation_granularity()");
 384   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 385          "not a power of 2");
 386 
 387   // The necessary attach point alignment for generated wish addresses.
 388   // This is needed to increase the chance of attaching for mmap and shmat.
 389   const size_t os_attach_point_alignment =
 390     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 391     NOT_AIX(os::vm_allocation_granularity());
 392   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 393 
 394   guarantee(HeapSearchSteps > 0, "Don't set HeapSearchSteps to 0");
 395   const uint64_t num_attempts = HeapSearchSteps;
 396 
 397   char *aligned_HBMA = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 398   size_t noaccess_prefix = ((aligned_HBMA + size) > (char*)OopEncodingHeapMax) ? noaccess_prefix_size(alignment) : 0;
 399 
 400   // Attempt to alloc at user-given address.
 401   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 402     if (PrintCompressedOopsMode && Verbose) {
 403       tty->print(" == H E A P B A S E M I N A D D R E S S ==\n");
 404     }
 405     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_HBMA);
 406     if (_base != aligned_HBMA) { // Enforce this exact address.
 407       release();
 408     }
 409   }
 410 
 411   // Keep heap at HeapBaseMinAddress.
 412   if (!_base) {
 413 
 414     if (PrintCompressedOopsMode && Verbose) {
 415       tty->print(" == U N S C A L E D ==\n");
 416     }
 417 
 418     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 419     // Give it several tries from top of range to bottom.
 420     if (aligned_HBMA + size <= (char *)UnscaledOopHeapMax) {
 421 
 422       // Calc address range within we try to attach (range of possible start addresses).
 423       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 424       char* const lowest_start  = (char *)align_ptr_up  (        aligned_HBMA             , attach_point_alignment);
 425       const size_t attach_range = highest_start - lowest_start;
 426 
 427       // Cap num_attempts at possible number.
 428       const uint64_t num_attempts_possible =
 429         (attach_range / attach_point_alignment) + 1; // At least one is possible even for 0 sized attach range.
 430       const uint64_t num_attempts_to_try = MIN2(num_attempts, num_attempts_possible);
 431 
 432       const size_t stepsize = align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 433 
 434       // Try attach points from top to bottom.
 435       char* attach_point = highest_start;
 436       while (attach_point >= lowest_start  &&
 437              attach_point <= highest_start &&  // Avoid wrap around.
 438              (!_base || _base < aligned_HBMA || _base + size > (char *)UnscaledOopHeapMax)) {
 439         try_reserve_heap(size, alignment, large, attach_point);
 440         attach_point -= stepsize;
 441       }
 442 
 443     }
 444 
 445     if (PrintCompressedOopsMode && Verbose) {
 446       tty->print(" == Z E R O B A S E D ==\n");
 447     }
 448 
 449     // zerobased: Attempt to allocate in the lower 32G.
 450     // But leave room for the compressed class pointers, which is allocated above
 451     // the heap.
 452     char *zerobased_max = (char *)OopEncodingHeapMax;
 453     // For small heaps, save some space for compressed class pointer
 454     // space so it can be decoded with no base.
 455     if (UseCompressedClassPointers && !UseSharedSpaces &&
 456         OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
 457       const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 458       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 459     }
 460 
 461     // Give it several tries from top of range to bottom.
 462     if (aligned_HBMA + size <= zerobased_max &&       // Zerobased theoretical possible.
 463         (!_base ||                                    // No previous try succeeded.
 464          (_base && _base + size > zerobased_max))) {  // Unscaled delivered an arbitrary address.
 465 
 466       // Calc address range within we try to attach (range of possible start addresses).
 467       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 468       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 469       // "Cannot use int to initialize char*." Introduce aux variable.
 470       char *unscaled_end = (char *)UnscaledOopHeapMax;
 471       unscaled_end -= size;
 472       char *lowest_start = (size < UnscaledOopHeapMax) ? MAX2(unscaled_end, aligned_HBMA) : aligned_HBMA;
 473       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 474       const size_t attach_range = highest_start - lowest_start;
 475 
 476       // Cap num_attempts at possible number.
 477       const uint64_t num_attempts_possible =
 478         (attach_range / attach_point_alignment) + 1; // At least one is possible even for 0 sized attach range.
 479       const uint64_t num_attempts_to_try = MIN2(num_attempts, num_attempts_possible);
 480 
 481       const size_t stepsize = align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 482 
 483       // Try attach points from top to bottom.
 484       char* attach_point = highest_start;
 485       while (attach_point >= lowest_start  &&
 486              attach_point <= highest_start &&  // Avoid wrap around.
 487              (!_base || _base < aligned_HBMA || _base + size > zerobased_max)) {
 488         try_reserve_heap(size, alignment, large, attach_point);
 489         attach_point -= stepsize;
 490       }
 491 
 492     }
 493 
 494     if (PrintCompressedOopsMode && Verbose) {
 495       tty->print(" == D I S J O I N T B A S E ==\n");
 496     }
 497 
 498     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 499     // implement null checks.
 500     noaccess_prefix = noaccess_prefix_size(alignment);
 501 
 502     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 503     char** addresses = Universe::get_attach_addresses_for_disjoint_mode();
 504     int i = 0;
 505     while (addresses[i] &&
 506            (!_base ||
 507             (_base && _base + size > (char *)OopEncodingHeapMax &&
 508              !Universe::is_disjoint_heap_base_address((address)_base)))) {
 509       char* const attach_point = addresses[i];
 510       assert(attach_point >= aligned_HBMA, "Flag support broken");
 511       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 512       i++;
 513     }
 514 
 515     if (PrintCompressedOopsMode && Verbose) {
 516       tty->print(" == H E A P B A S E D ==\n");
 517     }
 518 
 519     // Last, desperate try without any placement.
 520     if (!_base) {
 521       if (PrintCompressedOopsMode && Verbose) {
 522         tty->print("Trying to allocate at address NULL size" PTR_FORMAT ".\n", (address)size);
 523       }
 524       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 525     }
 526   }
 527 
 528   assert(!_base || markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 529          "area must be distinguishable from marks for mark-sweep");
 530   assert(!_base || markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 531          "area must be distinguishable from marks for mark-sweep");
 532 }
 533 
 534 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 535 
 536   if (size == 0) {
 537     return;
 538   }
 539 
 540   // Heap size should be aligned to alignment, too.
 541   guarantee(is_size_aligned(size, alignment), "set by caller");
 542 
 543   if (UseCompressedOops) {
 544     initialize_compressed_heap(size, alignment, large);
 545     if (base() && base() + size > (char *)OopEncodingHeapMax) {
 546       establish_noaccess_prefix();
 547     }
 548 
 549   } else {
 550     initialize(size, alignment, large, NULL, false);
 551   }
 552 
 553   if (base() > 0) {
 554     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 555   }




 556 }
 557 
 558 // Reserve space for code segment.  Same as Java heap only we mark this as
 559 // executable.
 560 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 561                                      size_t rs_align,
 562                                      bool large) :
 563   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 564   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 565 }
 566 
 567 // VirtualSpace
 568 
 569 VirtualSpace::VirtualSpace() {
 570   _low_boundary           = NULL;
 571   _high_boundary          = NULL;
 572   _low                    = NULL;
 573   _high                   = NULL;
 574   _lower_high             = NULL;
 575   _middle_high            = NULL;


1015       *p = 1;
1016     }
1017   }
1018 
1019   static void release_memory_for_test(ReservedSpace rs) {
1020     if (rs.special()) {
1021       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1022     } else {
1023       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1024     }
1025   }
1026 
1027   static void test_reserved_space1(size_t size, size_t alignment) {
1028     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1029 
1030     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1031 
1032     ReservedSpace rs(size,          // size
1033                      alignment,     // alignment
1034                      UseLargePages, // large
1035                      (char *)NULL); // requested_address

1036 
1037     test_log(" rs.special() == %d", rs.special());
1038 
1039     assert(rs.base() != NULL, "Must be");
1040     assert(rs.size() == size, "Must be");
1041 
1042     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1043     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1044 
1045     if (rs.special()) {
1046       small_page_write(rs.base(), size);
1047     }
1048 
1049     release_memory_for_test(rs);
1050   }
1051 
1052   static void test_reserved_space2(size_t size) {
1053     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1054 
1055     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");


< prev index next >