< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page
rev 7602 : 8064457: Introduce compressed oops mode disjoint base and improve compressed heap handling.


 405         MetaspaceGC::allowed_expansion() >= words) {
 406       return true;
 407     }
 408   }
 409 
 410   return false;
 411 }
 412 
 413   // byte_size is the size of the associated virtualspace.
 414 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 415   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 416 
 417 #if INCLUDE_CDS
 418   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 419   // configurable address, generally at the top of the Java heap so other
 420   // memory addresses don't conflict.
 421   if (DumpSharedSpaces) {
 422     bool large_pages = false; // No large pages when dumping the CDS archive.
 423     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 424 
 425     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
 426     if (_rs.is_reserved()) {
 427       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 428     } else {
 429       // Get a mmap region anywhere if the SharedBaseAddress fails.
 430       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 431     }
 432     MetaspaceShared::set_shared_rs(&_rs);
 433   } else
 434 #endif
 435   {
 436     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 437 
 438     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 439   }
 440 
 441   if (_rs.is_reserved()) {
 442     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 443     assert(_rs.size() != 0, "Catch if we get a 0 size");
 444     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 445     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());


3008   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3009 }
3010 #endif
3011 
3012 // Try to allocate the metaspace at the requested addr.
3013 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3014   assert(using_class_space(), "called improperly");
3015   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3016   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3017          "Metaspace size is too big");
3018   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3019   assert_is_ptr_aligned(cds_base, _reserve_alignment);
3020   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3021 
3022   // Don't use large pages for the class space.
3023   bool large_pages = false;
3024 
3025   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3026                                              _reserve_alignment,
3027                                              large_pages,
3028                                              requested_addr, 0);
3029   if (!metaspace_rs.is_reserved()) {
3030 #if INCLUDE_CDS
3031     if (UseSharedSpaces) {
3032       size_t increment = align_size_up(1*G, _reserve_alignment);
3033 
3034       // Keep trying to allocate the metaspace, increasing the requested_addr
3035       // by 1GB each time, until we reach an address that will no longer allow
3036       // use of CDS with compressed klass pointers.
3037       char *addr = requested_addr;
3038       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3039              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3040         addr = addr + increment;
3041         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3042                                      _reserve_alignment, large_pages, addr, 0);
3043       }
3044     }
3045 #endif
3046     // If no successful allocation then try to allocate the space anywhere.  If
3047     // that fails then OOM doom.  At this point we cannot try allocating the
3048     // metaspace as if UseCompressedClassPointers is off because too much
3049     // initialization has happened that depends on UseCompressedClassPointers.
3050     // So, UseCompressedClassPointers cannot be turned off at this point.
3051     if (!metaspace_rs.is_reserved()) {
3052       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3053                                    _reserve_alignment, large_pages);
3054       if (!metaspace_rs.is_reserved()) {
3055         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3056                                               compressed_class_space_size()));
3057       }
3058     }
3059   }
3060 
3061   // If we got here then the metaspace got allocated.
3062   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);




 405         MetaspaceGC::allowed_expansion() >= words) {
 406       return true;
 407     }
 408   }
 409 
 410   return false;
 411 }
 412 
 413   // byte_size is the size of the associated virtualspace.
 414 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 415   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 416 
 417 #if INCLUDE_CDS
 418   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 419   // configurable address, generally at the top of the Java heap so other
 420   // memory addresses don't conflict.
 421   if (DumpSharedSpaces) {
 422     bool large_pages = false; // No large pages when dumping the CDS archive.
 423     char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
 424 
 425     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
 426     if (_rs.is_reserved()) {
 427       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 428     } else {
 429       // Get a mmap region anywhere if the SharedBaseAddress fails.
 430       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 431     }
 432     MetaspaceShared::set_shared_rs(&_rs);
 433   } else
 434 #endif
 435   {
 436     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 437 
 438     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
 439   }
 440 
 441   if (_rs.is_reserved()) {
 442     assert(_rs.base() != NULL, "Catch if we get a NULL address");
 443     assert(_rs.size() != 0, "Catch if we get a 0 size");
 444     assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
 445     assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());


3008   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3009 }
3010 #endif
3011 
3012 // Try to allocate the metaspace at the requested addr.
3013 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3014   assert(using_class_space(), "called improperly");
3015   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3016   assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3017          "Metaspace size is too big");
3018   assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3019   assert_is_ptr_aligned(cds_base, _reserve_alignment);
3020   assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3021 
3022   // Don't use large pages for the class space.
3023   bool large_pages = false;
3024 
3025   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3026                                              _reserve_alignment,
3027                                              large_pages,
3028                                              requested_addr);
3029   if (!metaspace_rs.is_reserved()) {
3030 #if INCLUDE_CDS
3031     if (UseSharedSpaces) {
3032       size_t increment = align_size_up(1*G, _reserve_alignment);
3033 
3034       // Keep trying to allocate the metaspace, increasing the requested_addr
3035       // by 1GB each time, until we reach an address that will no longer allow
3036       // use of CDS with compressed klass pointers.
3037       char *addr = requested_addr;
3038       while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3039              can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3040         addr = addr + increment;
3041         metaspace_rs = ReservedSpace(compressed_class_space_size(),
3042                                      _reserve_alignment, large_pages, addr);
3043       }
3044     }
3045 #endif
3046     // If no successful allocation then try to allocate the space anywhere.  If
3047     // that fails then OOM doom.  At this point we cannot try allocating the
3048     // metaspace as if UseCompressedClassPointers is off because too much
3049     // initialization has happened that depends on UseCompressedClassPointers.
3050     // So, UseCompressedClassPointers cannot be turned off at this point.
3051     if (!metaspace_rs.is_reserved()) {
3052       metaspace_rs = ReservedSpace(compressed_class_space_size(),
3053                                    _reserve_alignment, large_pages);
3054       if (!metaspace_rs.is_reserved()) {
3055         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3056                                               compressed_class_space_size()));
3057       }
3058     }
3059   }
3060 
3061   // If we got here then the metaspace got allocated.
3062   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);


< prev index next >