< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page




 196 
 197       // Make sure that size is aligned
 198       size = align_up(size, alignment);
 199       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 200 
 201       if (requested_address != 0 &&
 202           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 203         // As a result of the alignment constraints, the allocated base differs
 204         // from the requested address. Return back to the caller who can
 205         // take remedial action (like try again without a requested address).
 206         assert(_base == NULL, "should be");
 207         return;
 208       }
 209     }
 210   }
 211   // Done
 212   _base = base;
 213   _size = size;
 214   _alignment = alignment;
 215   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 216   if (_fd_for_heap != -1) {
 217     _special = true;
 218   }
 219 }
 220 
 221 
 222 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 223                              bool special, bool executable) {
 224   assert((size % os::vm_allocation_granularity()) == 0,
 225          "size not allocation aligned");
 226   _base = base;
 227   _size = size;
 228   _alignment = alignment;
 229   _noaccess_prefix = 0;
 230   _special = special;
 231   _executable = executable;
 232 }
 233 
 234 
 235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 236                                         bool split, bool realloc) {


 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       Universe::set_narrow_oop_use_implicit_null_checks(false);
 322     }
 323   }
 324 
 325   _base += _noaccess_prefix;
 326   _size -= _noaccess_prefix;
 327   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 328 }
 329 
 330 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 331 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 332 // might still fulfill the wishes of the caller.
 333 // Assures the memory is aligned to 'alignment'.
 334 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 335 void ReservedHeapSpace::try_reserve_heap(size_t size,
 336                                          size_t alignment,
 337                                          bool large,
 338                                          char* requested_address) {
 339   if (_base != NULL) {
 340     // We tried before, but we didn't like the address delivered.
 341     release();
 342   }
 343 
 344   // If OS doesn't support demand paging for large page memory, we need


 386     // least 64K.
 387 
 388     // If the memory was requested at a particular address, use
 389     // os::attempt_reserve_memory_at() to avoid over mapping something
 390     // important.  If available space is not detected, return NULL.
 391 
 392     if (requested_address != 0) {
 393       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 394     } else {
 395       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 396     }
 397   }
 398   if (base == NULL) { return; }
 399 
 400   // Done
 401   _base = base;
 402   _size = size;
 403   _alignment = alignment;
 404 
 405   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 406   if (_fd_for_heap != -1) {
 407     _special = true;
 408   }
 409 
 410   // Check alignment constraints
 411   if ((((size_t)base) & (alignment - 1)) != 0) {
 412     // Base not aligned, retry.
 413     release();
 414   }
 415 }
 416 
 417 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 418                                           char *lowest_start,
 419                                           size_t attach_point_alignment,
 420                                           char *aligned_heap_base_min_address,
 421                                           char *upper_bound,
 422                                           size_t size,
 423                                           size_t alignment,
 424                                           bool large) {
 425   const size_t attach_range = highest_start - lowest_start;
 426   // Cap num_attempts at possible number.


 590       char* const attach_point = addresses[i];
 591       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 592       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 593       i++;
 594     }
 595 
 596     // Last, desperate try without any placement.
 597     if (_base == NULL) {
 598       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 599       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 600     }
 601   }
 602 }
 603 
 604 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 605 
 606   if (size == 0) {
 607     return;
 608   }
 609 















 610   if (heap_allocation_directory != NULL) {
 611     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 612     if (_fd_for_heap == -1) {
 613       vm_exit_during_initialization(
 614         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 615     }
 616   }
 617 
 618   // Heap size should be aligned to alignment, too.
 619   guarantee(is_aligned(size, alignment), "set by caller");
 620 
 621   if (UseCompressedOops) {
 622     initialize_compressed_heap(size, alignment, large);
 623     if (_size > size) {
 624       // We allocated heap with noaccess prefix.
 625       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 626       // if we had to try at arbitrary address.
 627       establish_noaccess_prefix();
 628     }
 629   } else {
 630     initialize(size, alignment, large, NULL, false);
 631   }
 632 
 633   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 634          "area must be distinguishable from marks for mark-sweep");
 635   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 636          "area must be distinguishable from marks for mark-sweep");
 637 
 638   if (base() != NULL) {
 639     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);



 640   }
 641 
 642   if (_fd_for_heap != -1) {
 643     os::close(_fd_for_heap);
 644   }
 645 }
 646 
 647 // Reserve space for code segment.  Same as Java heap only we mark this as
 648 // executable.
 649 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 650                                      size_t rs_align,
 651                                      bool large) :
 652   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 653   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 654 }
 655 
 656 // VirtualSpace
 657 
 658 VirtualSpace::VirtualSpace() {
 659   _low_boundary           = NULL;
 660   _high_boundary          = NULL;
 661   _low                    = NULL;
 662   _high                   = NULL;




 196 
 197       // Make sure that size is aligned
 198       size = align_up(size, alignment);
 199       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 200 
 201       if (requested_address != 0 &&
 202           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 203         // As a result of the alignment constraints, the allocated base differs
 204         // from the requested address. Return back to the caller who can
 205         // take remedial action (like try again without a requested address).
 206         assert(_base == NULL, "should be");
 207         return;
 208       }
 209     }
 210   }
 211   // Done
 212   _base = base;
 213   _size = size;
 214   _alignment = alignment;
 215   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 216   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 217     _special = true;
 218   }
 219 }
 220 
 221 
 222 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 223                              bool special, bool executable) {
 224   assert((size % os::vm_allocation_granularity()) == 0,
 225          "size not allocation aligned");
 226   _base = base;
 227   _size = size;
 228   _alignment = alignment;
 229   _noaccess_prefix = 0;
 230   _special = special;
 231   _executable = executable;
 232 }
 233 
 234 
 235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 236                                         bool split, bool realloc) {


 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       Universe::set_narrow_oop_use_implicit_null_checks(false);
 322     }
 323   }

 324   _base += _noaccess_prefix;
 325   _size -= _noaccess_prefix;
 326   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 327 }
 328 
 329 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 330 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 331 // might still fulfill the wishes of the caller.
 332 // Assures the memory is aligned to 'alignment'.
 333 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 334 void ReservedHeapSpace::try_reserve_heap(size_t size,
 335                                          size_t alignment,
 336                                          bool large,
 337                                          char* requested_address) {
 338   if (_base != NULL) {
 339     // We tried before, but we didn't like the address delivered.
 340     release();
 341   }
 342 
 343   // If OS doesn't support demand paging for large page memory, we need


 385     // least 64K.
 386 
 387     // If the memory was requested at a particular address, use
 388     // os::attempt_reserve_memory_at() to avoid over mapping something
 389     // important.  If available space is not detected, return NULL.
 390 
 391     if (requested_address != 0) {
 392       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 393     } else {
 394       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 395     }
 396   }
 397   if (base == NULL) { return; }
 398 
 399   // Done
 400   _base = base;
 401   _size = size;
 402   _alignment = alignment;
 403 
 404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 405   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 406     _special = true;
 407   }
 408 
 409   // Check alignment constraints
 410   if ((((size_t)base) & (alignment - 1)) != 0) {
 411     // Base not aligned, retry.
 412     release();
 413   }
 414 }
 415 
 416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 417                                           char *lowest_start,
 418                                           size_t attach_point_alignment,
 419                                           char *aligned_heap_base_min_address,
 420                                           char *upper_bound,
 421                                           size_t size,
 422                                           size_t alignment,
 423                                           bool large) {
 424   const size_t attach_range = highest_start - lowest_start;
 425   // Cap num_attempts at possible number.


 589       char* const attach_point = addresses[i];
 590       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 591       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 592       i++;
 593     }
 594 
 595     // Last, desperate try without any placement.
 596     if (_base == NULL) {
 597       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 599     }
 600   }
 601 }
 602 
 603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 604 
 605   if (size == 0) {
 606     return;
 607   }
 608 
 609   // Open AllocateOldGenAt file
 610   if (AllocateOldGenAt != NULL) {
 611     _fd_for_heap = os::create_file_for_heap(AllocateOldGenAt);
 612     if (_fd_for_heap== -1) {
 613       vm_exit_during_initialization(
 614         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 615     }
 616     // Allocate space on device.
 617     os::allocate_file(_fd_for_heap, MaxHeapSize);
 618     os::set_nvdimm_fd(_fd_for_heap);
 619     os::set_nvdimm_present(true);
 620   } else {
 621     _fd_for_heap = -1;
 622   }
 623 
 624   if (heap_allocation_directory != NULL) {
 625     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 626     if (_fd_for_heap == -1) {
 627       vm_exit_during_initialization(
 628         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 629     }
 630   }
 631 
 632   // Heap size should be aligned to alignment, too.
 633   guarantee(is_aligned(size, alignment), "set by caller");
 634 
 635   if (UseCompressedOops) {
 636     initialize_compressed_heap(size, alignment, large);
 637     if (_size > size) {
 638       // We allocated heap with noaccess prefix.
 639       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 640       // if we had to try at arbitrary address.
 641       establish_noaccess_prefix();
 642     }
 643   } else {
 644     initialize(size, alignment, large, NULL, false);
 645   }
 646 
 647   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 648          "area must be distinguishable from marks for mark-sweep");
 649   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 650          "area must be distinguishable from marks for mark-sweep");
 651 
 652   if (base() != NULL) {
 653     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 654     if (AllocateOldGenAt != NULL && _fd_for_heap != -1) {
 655       os::set_nvdimm_heapbase((address)_base);
 656     }
 657   }
 658 
 659   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 660     os::close(_fd_for_heap);
 661   }
 662 }
 663 
 664 // Reserve space for code segment.  Same as Java heap only we mark this as
 665 // executable.
 666 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 667                                      size_t rs_align,
 668                                      bool large) :
 669   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 670   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 671 }
 672 
 673 // VirtualSpace
 674 
 675 VirtualSpace::VirtualSpace() {
 676   _low_boundary           = NULL;
 677   _high_boundary          = NULL;
 678   _low                    = NULL;
 679   _high                   = NULL;


< prev index next >