< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "services/memTracker.hpp"
  32 #include "utilities/align.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 











  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (!os::release_memory(base, size)) {
  91         fatal("os::release_memory failed");
  92       }
  93     }
  94   }
  95   return true;
  96 }
  97 
  98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  99                                char* requested_address,
 100                                bool executable) {
 101   const size_t granularity = os::vm_allocation_granularity();
 102   assert((size & (granularity - 1)) == 0,
 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.



 123   bool special = large && !os::can_commit_large_page_memory();








 124   char* base = NULL;
 125 
 126   if (special) {
 127 
 128     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 129 
 130     if (base != NULL) {
 131       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 132         // OS ignored requested address. Try different address.
 133         return;
 134       }
 135       // Check alignment constraints.
 136       assert((uintptr_t) base % alignment == 0,
 137              "Large pages returned a non-aligned address, base: "
 138              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 139              p2i(base), alignment);
 140       _special = true;
 141     } else {
 142       // failed; try to reserve regular memory below
 143       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 144                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 145         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 146       }
 147     }
 148   }
 149 
 150   if (base == NULL) {
 151     // Optimistically assume that the OSes returns an aligned base pointer.
 152     // When reserving a large address range, most OSes seem to align to at
 153     // least 64K.
 154 
 155     // If the memory was requested at a particular address, use
 156     // os::attempt_reserve_memory_at() to avoid over mapping something
 157     // important.  If available space is not detected, return NULL.
 158 
 159     if (requested_address != 0) {
 160       base = os::attempt_reserve_memory_at(size, requested_address);
 161       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 162         // OS ignored requested address. Try different address.
 163         base = NULL;
 164       }
 165     } else {
 166       base = os::reserve_memory(size, NULL, alignment);
 167     }
 168 
 169     if (base == NULL) return;
 170 
 171     // Check alignment constraints
 172     if ((((size_t)base) & (alignment - 1)) != 0) {
 173       // Base not aligned, retry
 174       if (!os::release_memory(base, size)) fatal("os::release_memory failed");

 175       // Make sure that size is aligned
 176       size = align_up(size, alignment);
 177       base = os::reserve_memory_aligned(size, alignment);
 178 
 179       if (requested_address != 0 &&
 180           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 181         // As a result of the alignment constraints, the allocated base differs
 182         // from the requested address. Return back to the caller who can
 183         // take remedial action (like try again without a requested address).
 184         assert(_base == NULL, "should be");
 185         return;
 186       }
 187     }
 188   }
 189   // Done
 190   _base = base;
 191   _size = size;
 192   _alignment = alignment;




 193 }
 194 
 195 
 196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 197                              bool special, bool executable) {
 198   assert((size % os::vm_allocation_granularity()) == 0,
 199          "size not allocation aligned");
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _noaccess_prefix = 0;
 204   _special = special;
 205   _executable = executable;
 206 }
 207 
 208 
 209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 210                                         bool split, bool realloc) {
 211   assert(partition_size <= size(), "partition failed");
 212   if (split) {


 235 size_t ReservedSpace::page_align_size_down(size_t size) {
 236   return align_down(size, os::vm_page_size());
 237 }
 238 
 239 
 240 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 241   return align_up(size, os::vm_allocation_granularity());
 242 }
 243 
 244 
 245 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 246   return align_down(size, os::vm_allocation_granularity());
 247 }
 248 
 249 
 250 void ReservedSpace::release() {
 251   if (is_reserved()) {
 252     char *real_base = _base - _noaccess_prefix;
 253     const size_t real_size = _size + _noaccess_prefix;
 254     if (special()) {



 255       os::release_memory_special(real_base, real_size);

 256     } else{
 257       os::release_memory(real_base, real_size);
 258     }
 259     _base = NULL;
 260     _size = 0;
 261     _noaccess_prefix = 0;
 262     _alignment = 0;
 263     _special = false;
 264     _executable = false;
 265   }
 266 }
 267 
 268 static size_t noaccess_prefix_size(size_t alignment) {
 269   return lcm(os::vm_page_size(), alignment);
 270 }
 271 
 272 void ReservedHeapSpace::establish_noaccess_prefix() {
 273   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 274   _noaccess_prefix = noaccess_prefix_size(_alignment);
 275 


 296   _size -= _noaccess_prefix;
 297   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 298 }
 299 
 300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 302 // might still fulfill the wishes of the caller.
 303 // Assures the memory is aligned to 'alignment'.
 304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 305 void ReservedHeapSpace::try_reserve_heap(size_t size,
 306                                          size_t alignment,
 307                                          bool large,
 308                                          char* requested_address) {
 309   if (_base != NULL) {
 310     // We tried before, but we didn't like the address delivered.
 311     release();
 312   }
 313 
 314   // If OS doesn't support demand paging for large page memory, we need
 315   // to use reserve_memory_special() to reserve and pin the entire region.



 316   bool special = large && !os::can_commit_large_page_memory();







 317   char* base = NULL;
 318 
 319   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 320                              " heap of size " SIZE_FORMAT_HEX,
 321                              p2i(requested_address),
 322                              size);
 323 
 324   if (special) {
 325     base = os::reserve_memory_special(size, alignment, requested_address, false);
 326 
 327     if (base != NULL) {
 328       // Check alignment constraints.
 329       assert((uintptr_t) base % alignment == 0,
 330              "Large pages returned a non-aligned address, base: "
 331              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 332              p2i(base), alignment);
 333       _special = true;
 334     }
 335   }
 336 
 337   if (base == NULL) {
 338     // Failed; try to reserve regular memory below
 339     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 340                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 341       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) { return; }
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;
 364 





 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369   }
 370 }
 371 
 372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 373                                           char *lowest_start,
 374                                           size_t attach_point_alignment,
 375                                           char *aligned_heap_base_min_address,
 376                                           char *upper_bound,
 377                                           size_t size,
 378                                           size_t alignment,
 379                                           bool large) {
 380   const size_t attach_range = highest_start - lowest_start;
 381   // Cap num_attempts at possible number.
 382   // At least one is possible even for 0 sized attach range.
 383   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 384   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);


 539     char** addresses = get_attach_addresses_for_disjoint_mode();
 540     int i = 0;
 541     while (addresses[i] &&                                 // End of array not yet reached.
 542            ((_base == NULL) ||                             // No previous try succeeded.
 543             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 544              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 545       char* const attach_point = addresses[i];
 546       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 547       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 548       i++;
 549     }
 550 
 551     // Last, desperate try without any placement.
 552     if (_base == NULL) {
 553       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 554       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 555     }
 556   }
 557 }
 558 
 559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 560 
 561   if (size == 0) {
 562     return;
 563   }
 564 








 565   // Heap size should be aligned to alignment, too.
 566   guarantee(is_aligned(size, alignment), "set by caller");
 567 
 568   if (UseCompressedOops) {
 569     initialize_compressed_heap(size, alignment, large);
 570     if (_size > size) {
 571       // We allocated heap with noaccess prefix.
 572       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 573       // if we had to try at arbitrary address.
 574       establish_noaccess_prefix();
 575     }
 576   } else {
 577     initialize(size, alignment, large, NULL, false);
 578   }
 579 
 580   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 581          "area must be distinguishable from marks for mark-sweep");
 582   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 583          "area must be distinguishable from marks for mark-sweep");
 584 
 585   if (base() != NULL) {
 586     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);




 587   }
 588 }
 589 
 590 // Reserve space for code segment.  Same as Java heap only we mark this as
 591 // executable.
 592 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 593                                      size_t rs_align,
 594                                      bool large) :
 595   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 596   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 597 }
 598 
 599 // VirtualSpace
 600 
 601 VirtualSpace::VirtualSpace() {
 602   _low_boundary           = NULL;
 603   _high_boundary          = NULL;
 604   _low                    = NULL;
 605   _high                   = NULL;
 606   _lower_high             = NULL;


   1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "services/memTracker.hpp"
  32 #include "utilities/align.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) : _fd_for_heap(-1) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) : _fd_for_heap(-1) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method
  73 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  74   if (is_file_mapped) {
  75     if (!os::unmap_memory(base, size)) {
  76       fatal("os::unmap_memory failed");
  77     }
  78   } else if (!os::release_memory(base, size)) {
  79     fatal("os::release_memory failed");
  80   }
  81 }
  82 
  83 // Helper method.
  84 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  85                                            const size_t size, bool special, bool is_file_mapped = false)
  86 {
  87   if (base == requested_address || requested_address == NULL)
  88     return false; // did not fail
  89 
  90   if (base != NULL) {
  91     // Different reserve address may be acceptable in other cases
  92     // but for compressed oops heap should be at requested address.
  93     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  94     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  95     // OS ignored requested address. Try different address.
  96     if (special) {
  97       if (!os::release_memory_special(base, size)) {
  98         fatal("os::release_memory_special failed");
  99       }
 100     } else {
 101       unmap_or_release_memory(base, size, is_file_mapped); 


 102     }
 103   }
 104   return true;
 105 }
 106 
 107 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 108                                char* requested_address,
 109                                bool executable) {
 110   const size_t granularity = os::vm_allocation_granularity();
 111   assert((size & (granularity - 1)) == 0,
 112          "size not aligned to os::vm_allocation_granularity()");
 113   assert((alignment & (granularity - 1)) == 0,
 114          "alignment not aligned to os::vm_allocation_granularity()");
 115   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 116          "not a power of 2");
 117 
 118   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 119 
 120   _base = NULL;
 121   _size = 0;
 122   _special = false;
 123   _executable = executable;
 124   _alignment = 0;
 125   _noaccess_prefix = 0;
 126   if (size == 0) {
 127     return;
 128   }
 129 
 130   // If OS doesn't support demand paging for large page memory, we need
 131   // to use reserve_memory_special() to reserve and pin the entire region.
 132   // If there is a backing file directory for this space then whether
 133   // large pages are allocated is up to the filesystem of the backing file.
 134   // So we ignore the UseLargePages flag in this case.
 135   bool special = large && !os::can_commit_large_page_memory();
 136   if (special && _fd_for_heap != -1) {
 137     special = false;
 138     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 139       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 140       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 141     }
 142   }
 143 
 144   char* base = NULL;
 145 
 146   if (special) {
 147 
 148     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 149 
 150     if (base != NULL) {
 151       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 152         // OS ignored requested address. Try different address.
 153         return;
 154       }
 155       // Check alignment constraints.
 156       assert((uintptr_t) base % alignment == 0,
 157              "Large pages returned a non-aligned address, base: "
 158              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 159              p2i(base), alignment);
 160       _special = true;
 161     } else {
 162       // failed; try to reserve regular memory below
 163       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 164                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 165         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 166       }
 167     }
 168   }
 169 
 170   if (base == NULL) {
 171     // Optimistically assume that the OSes returns an aligned base pointer.
 172     // When reserving a large address range, most OSes seem to align to at
 173     // least 64K.
 174 
 175     // If the memory was requested at a particular address, use
 176     // os::attempt_reserve_memory_at() to avoid over mapping something
 177     // important.  If available space is not detected, return NULL.
 178 
 179     if (requested_address != 0) {
 180       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 181       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 182         // OS ignored requested address. Try different address.
 183         base = NULL;
 184       }
 185     } else {
 186       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 187     }
 188 
 189     if (base == NULL) return;
 190 
 191     // Check alignment constraints
 192     if ((((size_t)base) & (alignment - 1)) != 0) {
 193       // Base not aligned, retry
 194       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 195 
 196       // Make sure that size is aligned
 197       size = align_up(size, alignment);
 198       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 199 
 200       if (requested_address != 0 &&
 201           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 202         // As a result of the alignment constraints, the allocated base differs
 203         // from the requested address. Return back to the caller who can
 204         // take remedial action (like try again without a requested address).
 205         assert(_base == NULL, "should be");
 206         return;
 207       }
 208     }
 209   }
 210   // Done
 211   _base = base;
 212   _size = size;
 213   _alignment = alignment;
 214   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 215   if (_fd_for_heap != -1) {
 216     _special = true;
 217   }
 218 }
 219 
 220 
 221 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 222                              bool special, bool executable) {
 223   assert((size % os::vm_allocation_granularity()) == 0,
 224          "size not allocation aligned");
 225   _base = base;
 226   _size = size;
 227   _alignment = alignment;
 228   _noaccess_prefix = 0;
 229   _special = special;
 230   _executable = executable;
 231 }
 232 
 233 
 234 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 235                                         bool split, bool realloc) {
 236   assert(partition_size <= size(), "partition failed");
 237   if (split) {


 260 size_t ReservedSpace::page_align_size_down(size_t size) {
 261   return align_down(size, os::vm_page_size());
 262 }
 263 
 264 
 265 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 266   return align_up(size, os::vm_allocation_granularity());
 267 }
 268 
 269 
 270 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 271   return align_down(size, os::vm_allocation_granularity());
 272 }
 273 
 274 
 275 void ReservedSpace::release() {
 276   if (is_reserved()) {
 277     char *real_base = _base - _noaccess_prefix;
 278     const size_t real_size = _size + _noaccess_prefix;
 279     if (special()) {
 280       if (_fd_for_heap != -1) {
 281         os::unmap_memory(real_base, real_size);
 282       } else {
 283         os::release_memory_special(real_base, real_size);
 284       }
 285     } else{
 286       os::release_memory(real_base, real_size);
 287     }
 288     _base = NULL;
 289     _size = 0;
 290     _noaccess_prefix = 0;
 291     _alignment = 0;
 292     _special = false;
 293     _executable = false;
 294   }
 295 }
 296 
 297 static size_t noaccess_prefix_size(size_t alignment) {
 298   return lcm(os::vm_page_size(), alignment);
 299 }
 300 
 301 void ReservedHeapSpace::establish_noaccess_prefix() {
 302   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 303   _noaccess_prefix = noaccess_prefix_size(_alignment);
 304 


 325   _size -= _noaccess_prefix;
 326   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 327 }
 328 
 329 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 330 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 331 // might still fulfill the wishes of the caller.
 332 // Assures the memory is aligned to 'alignment'.
 333 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 334 void ReservedHeapSpace::try_reserve_heap(size_t size,
 335                                          size_t alignment,
 336                                          bool large,
 337                                          char* requested_address) {
 338   if (_base != NULL) {
 339     // We tried before, but we didn't like the address delivered.
 340     release();
 341   }
 342 
 343   // If OS doesn't support demand paging for large page memory, we need
 344   // to use reserve_memory_special() to reserve and pin the entire region.
 345   // If there is a backing file directory for this space then whether
 346   // large pages are allocated is up to the filesystem of the backing file.
 347   // So we ignore the UseLargePages flag in this case.
 348   bool special = large && !os::can_commit_large_page_memory();
 349   if (special && _fd_for_heap != -1) {
 350     special = false;
 351     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 352                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 353       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 354     }
 355   }
 356   char* base = NULL;
 357 
 358   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 359                              " heap of size " SIZE_FORMAT_HEX,
 360                              p2i(requested_address),
 361                              size);
 362 
 363   if (special) {
 364     base = os::reserve_memory_special(size, alignment, requested_address, false);
 365 
 366     if (base != NULL) {
 367       // Check alignment constraints.
 368       assert((uintptr_t) base % alignment == 0,
 369              "Large pages returned a non-aligned address, base: "
 370              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 371              p2i(base), alignment);
 372       _special = true;
 373     }
 374   }
 375 
 376   if (base == NULL) {
 377     // Failed; try to reserve regular memory below
 378     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 379                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 380       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 381     }
 382 
 383     // Optimistically assume that the OSes returns an aligned base pointer.
 384     // When reserving a large address range, most OSes seem to align to at
 385     // least 64K.
 386 
 387     // If the memory was requested at a particular address, use
 388     // os::attempt_reserve_memory_at() to avoid over mapping something
 389     // important.  If available space is not detected, return NULL.
 390 
 391     if (requested_address != 0) {
 392       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 393     } else {
 394       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 395     }
 396   }
 397   if (base == NULL) { return; }
 398 
 399   // Done
 400   _base = base;
 401   _size = size;
 402   _alignment = alignment;
 403 
 404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 405   if (_fd_for_heap != -1) {
 406     _special = true;
 407   }
 408 
 409   // Check alignment constraints
 410   if ((((size_t)base) & (alignment - 1)) != 0) {
 411     // Base not aligned, retry.
 412     release();
 413   }
 414 }
 415 
 416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 417                                           char *lowest_start,
 418                                           size_t attach_point_alignment,
 419                                           char *aligned_heap_base_min_address,
 420                                           char *upper_bound,
 421                                           size_t size,
 422                                           size_t alignment,
 423                                           bool large) {
 424   const size_t attach_range = highest_start - lowest_start;
 425   // Cap num_attempts at possible number.
 426   // At least one is possible even for 0 sized attach range.
 427   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 428   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);


 583     char** addresses = get_attach_addresses_for_disjoint_mode();
 584     int i = 0;
 585     while (addresses[i] &&                                 // End of array not yet reached.
 586            ((_base == NULL) ||                             // No previous try succeeded.
 587             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 588              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 589       char* const attach_point = addresses[i];
 590       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 591       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 592       i++;
 593     }
 594 
 595     // Last, desperate try without any placement.
 596     if (_base == NULL) {
 597       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 599     }
 600   }
 601 }
 602 
 603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 604 
 605   if (size == 0) {
 606     return;
 607   }
 608 
 609   if (heap_allocation_directory != NULL) {
 610     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 611     if (_fd_for_heap == -1) {
 612       vm_exit_during_initialization(
 613         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 614     }
 615   }
 616 
 617   // Heap size should be aligned to alignment, too.
 618   guarantee(is_aligned(size, alignment), "set by caller");
 619 
 620   if (UseCompressedOops) {
 621     initialize_compressed_heap(size, alignment, large);
 622     if (_size > size) {
 623       // We allocated heap with noaccess prefix.
 624       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 625       // if we had to try at arbitrary address.
 626       establish_noaccess_prefix();
 627     }
 628   } else {
 629     initialize(size, alignment, large, NULL, false);
 630   }
 631 
 632   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 633          "area must be distinguishable from marks for mark-sweep");
 634   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 635          "area must be distinguishable from marks for mark-sweep");
 636 
 637   if (base() != NULL) {
 638     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 639   }
 640 
 641   if (_fd_for_heap != -1) {
 642     os::close(_fd_for_heap);
 643   }
 644 }
 645 
 646 // Reserve space for code segment.  Same as Java heap only we mark this as
 647 // executable.
 648 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 649                                      size_t rs_align,
 650                                      bool large) :
 651   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 652   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 653 }
 654 
 655 // VirtualSpace
 656 
 657 VirtualSpace::VirtualSpace() {
 658   _low_boundary           = NULL;
 659   _high_boundary          = NULL;
 660   _low                    = NULL;
 661   _high                   = NULL;
 662   _lower_high             = NULL;


< prev index next >