< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page




 472 
 473   // byte_size is the size of the associated virtualspace.
 474 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 475   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 476 
 477 #if INCLUDE_CDS
 478   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 479   // configurable address, generally at the top of the Java heap so other
 480   // memory addresses don't conflict.
 481   if (DumpSharedSpaces) {
 482     bool large_pages = false; // No large pages when dumping the CDS archive.
 483     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 484 
 485     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 486     if (_rs.is_reserved()) {
 487       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 488     } else {
 489       // Get a mmap region anywhere if the SharedBaseAddress fails.
 490       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 491     }




 492     MetaspaceShared::initialize_shared_rs(&_rs);
 493   } else
 494 #endif
 495   {
 496     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 497 
 498     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 499   }
 500 
 501   if (_rs.is_reserved()) {
 502     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 503     assert(_rs.size() != 0, "Catch if we get a 0 size");
 504     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 505     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 506 
 507     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 508   }
 509 }
 510 
 511 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {




 472 
 473   // byte_size is the size of the associated virtualspace.
 474 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 475   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 476 
 477 #if INCLUDE_CDS
 478   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 479   // configurable address, generally at the top of the Java heap so other
 480   // memory addresses don't conflict.
 481   if (DumpSharedSpaces) {
 482     bool large_pages = false; // No large pages when dumping the CDS archive.
 483     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 484 
 485     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 486     if (_rs.is_reserved()) {
 487       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 488     } else {
 489       // Get a mmap region anywhere if the SharedBaseAddress fails.
 490       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 491     }
 492     if (!_rs.is_reserved()) {
 493       vm_exit_during_initialization("Unable to allocate memory for shared space",
 494         err_msg(SIZE_FORMAT " bytes.", bytes));
 495     }
 496     MetaspaceShared::initialize_shared_rs(&_rs);
 497   } else
 498 #endif
 499   {
 500     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 501 
 502     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 503   }
 504 
 505   if (_rs.is_reserved()) {
 506     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 507     assert(_rs.size() != 0, "Catch if we get a 0 size");
 508     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 509     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
 510 
 511     MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 512   }
 513 }
 514 
 515 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {


< prev index next >